1
0
Fork 0

drm/mm: cleanup and improve next_hole_*_addr()

Skipping just one branch of the tree is not the most
effective approach.

Instead use a macro to define the traversal functions and
sort out both branch sides.

This improves the performance of the unit tests by
a factor of more than 4.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nirmoy Das <nirmoy.das@amd.com>
Link: https://patchwork.freedesktop.org/patch/370298/
zero-sugar-mainline-defconfig
Christian König 2020-06-15 16:16:42 +02:00
parent 271e7decd7
commit 5fad79fd66
1 changed files with 34 additions and 72 deletions

View File

@ -325,6 +325,11 @@ static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
return best; return best;
} }
static bool usable_hole_addr(struct rb_node *rb, u64 size)
{
return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
}
static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
{ {
struct rb_node *rb = mm->holes_addr.rb_node; struct rb_node *rb = mm->holes_addr.rb_node;
@ -333,7 +338,7 @@ static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
while (rb) { while (rb) {
u64 hole_start; u64 hole_start;
if (rb_hole_addr_to_node(rb)->subtree_max_hole < size) if (!usable_hole_addr(rb, size))
break; break;
node = rb_hole_addr_to_node(rb); node = rb_hole_addr_to_node(rb);
@ -374,82 +379,39 @@ first_hole(struct drm_mm *mm,
} }
/** /**
* next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
* @entry: previously selected drm_mm_node * @name: name of function to declare
* @size: size of the a hole needed for the request * @first: first rb member to traverse (either rb_left or rb_right).
* @last: last rb member to traverse (either rb_right or rb_left).
* *
* This function will verify whether left subtree of @entry has hole big enough * This macro declares a function to return the next hole of the addr rb tree.
* to fit the requtested size. If so, it will return previous node of @entry or * While traversing the tree we take the searched size into account and only
* else it will return parent node of @entry * visit branches with potential big enough holes.
*
* It will also skip the complete left subtree if subtree_max_hole of that
* subtree is same as the subtree_max_hole of the @entry.
*
* Returns:
* previous node of @entry if left subtree of @entry can serve the request or
* else return parent of @entry
*/ */
static struct drm_mm_node *
next_hole_high_addr(struct drm_mm_node *entry, u64 size)
{
struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
struct drm_mm_node *left_node;
if (!entry) #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
return NULL; static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
{ \
rb_node = &entry->rb_hole_addr; struct rb_node *parent, *node = &entry->rb_hole_addr; \
if (rb_node->rb_left) { \
left_rb_node = rb_node->rb_left; if (!entry || RB_EMPTY_NODE(node)) \
parent_rb_node = rb_parent(rb_node); return NULL; \
left_node = rb_entry(left_rb_node, \
struct drm_mm_node, rb_hole_addr); if (usable_hole_addr(node->first, size)) { \
if (left_node->subtree_max_hole < size && node = node->first; \
parent_rb_node && parent_rb_node->rb_left != rb_node) while (usable_hole_addr(node->last, size)) \
return rb_hole_addr_to_node(parent_rb_node); node = node->last; \
return rb_hole_addr_to_node(node); \
} \
\
while ((parent = rb_parent(node)) && node == parent->first) \
node = parent; \
\
return rb_hole_addr_to_node(parent); \
} }
return rb_hole_addr_to_node(rb_prev(rb_node)); DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
} DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
/**
* next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
* @entry: previously selected drm_mm_node
* @size: size of the a hole needed for the request
*
* This function will verify whether right subtree of @entry has hole big enough
* to fit the requtested size. If so, it will return next node of @entry or
* else it will return parent node of @entry
*
* It will also skip the complete right subtree if subtree_max_hole of that
* subtree is same as the subtree_max_hole of the @entry.
*
* Returns:
* next node of @entry if right subtree of @entry can serve the request or
* else return parent of @entry
*/
static struct drm_mm_node *
next_hole_low_addr(struct drm_mm_node *entry, u64 size)
{
struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
struct drm_mm_node *right_node;
if (!entry)
return NULL;
rb_node = &entry->rb_hole_addr;
if (rb_node->rb_right) {
right_rb_node = rb_node->rb_right;
parent_rb_node = rb_parent(rb_node);
right_node = rb_entry(right_rb_node,
struct drm_mm_node, rb_hole_addr);
if (right_node->subtree_max_hole < size &&
parent_rb_node && parent_rb_node->rb_right != rb_node)
return rb_hole_addr_to_node(parent_rb_node);
}
return rb_hole_addr_to_node(rb_next(rb_node));
}
static struct drm_mm_node * static struct drm_mm_node *
next_hole(struct drm_mm *mm, next_hole(struct drm_mm *mm,