summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c179
1 files changed, 52 insertions, 127 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index f4ca1ff80af9..6692abe564d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,6 +49,7 @@
#include <linux/stacktrace.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
/**
* DOC: Overview
@@ -118,8 +119,6 @@ static noinline void save_stack(struct drm_mm_node *node)
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long *entries;
- unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -133,8 +132,7 @@ static void show_leaks(struct drm_mm *mm)
continue;
}
- nr_entries = stack_depot_fetch(node->stack, &entries);
- stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
+ stack_depot_snprint(node->stack, buf, BUFSZ, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
@@ -154,7 +152,7 @@ static void show_leaks(struct drm_mm *mm) { }
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
u64, __subtree_last,
- START, LAST, static inline, drm_mm_interval_tree)
+ START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
@@ -305,11 +303,6 @@ static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
}
-static inline u64 rb_hole_size(struct rb_node *rb)
-{
- return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
-}
-
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
@@ -330,7 +323,12 @@ static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
return best;
}
-static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
+static bool usable_hole_addr(struct rb_node *rb, u64 size)
+{
+ return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
+}
+
+static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
{
struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
@@ -338,6 +336,9 @@ static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
while (rb) {
u64 hole_start;
+ if (!usable_hole_addr(rb, size))
+ break;
+
node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
@@ -363,10 +364,10 @@ first_hole(struct drm_mm *mm,
return best_hole(mm, size);
case DRM_MM_INSERT_LOW:
- return find_hole(mm, start);
+ return find_hole_addr(mm, start, size);
case DRM_MM_INSERT_HIGH:
- return find_hole(mm, end);
+ return find_hole_addr(mm, end, size);
case DRM_MM_INSERT_EVICT:
return list_first_entry_or_null(&mm->hole_stack,
@@ -376,84 +377,39 @@ first_hole(struct drm_mm *mm,
}
/**
- * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
- * @entry: previously selected drm_mm_node
- * @size: size of the a hole needed for the request
- *
- * This function will verify whether left subtree of @entry has hole big enough
- * to fit the requtested size. If so, it will return previous node of @entry or
- * else it will return parent node of @entry
- *
- * It will also skip the complete left subtree if subtree_max_hole of that
- * subtree is same as the subtree_max_hole of the @entry.
- *
- * Returns:
- * previous node of @entry if left subtree of @entry can serve the request or
- * else return parent of @entry
- */
-static struct drm_mm_node *
-next_hole_high_addr(struct drm_mm_node *entry, u64 size)
-{
- struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
- struct drm_mm_node *left_node;
-
- if (!entry)
- return NULL;
-
- rb_node = &entry->rb_hole_addr;
- if (rb_node->rb_left) {
- left_rb_node = rb_node->rb_left;
- parent_rb_node = rb_parent(rb_node);
- left_node = rb_entry(left_rb_node,
- struct drm_mm_node, rb_hole_addr);
- if ((left_node->subtree_max_hole < size ||
- entry->size == entry->subtree_max_hole) &&
- parent_rb_node && parent_rb_node->rb_left != rb_node)
- return rb_hole_addr_to_node(parent_rb_node);
- }
-
- return rb_hole_addr_to_node(rb_prev(rb_node));
-}
-
-/**
- * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
- * @entry: previously selected drm_mm_node
- * @size: size of the a hole needed for the request
- *
- * This function will verify whether right subtree of @entry has hole big enough
- * to fit the requtested size. If so, it will return next node of @entry or
- * else it will return parent node of @entry
- *
- * It will also skip the complete right subtree if subtree_max_hole of that
- * subtree is same as the subtree_max_hole of the @entry.
- *
- * Returns:
- * next node of @entry if right subtree of @entry can serve the request or
- * else return parent of @entry
+ * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
+ * @name: name of function to declare
+ * @first: first rb member to traverse (either rb_left or rb_right).
+ * @last: last rb member to traverse (either rb_right or rb_left).
+ *
+ * This macro declares a function to return the next hole of the addr rb tree.
+ * While traversing the tree we take the searched size into account and only
+ * visit branches with potential big enough holes.
*/
-static struct drm_mm_node *
-next_hole_low_addr(struct drm_mm_node *entry, u64 size)
-{
- struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
- struct drm_mm_node *right_node;
- if (!entry)
- return NULL;
-
- rb_node = &entry->rb_hole_addr;
- if (rb_node->rb_right) {
- right_rb_node = rb_node->rb_right;
- parent_rb_node = rb_parent(rb_node);
- right_node = rb_entry(right_rb_node,
- struct drm_mm_node, rb_hole_addr);
- if ((right_node->subtree_max_hole < size ||
- entry->size == entry->subtree_max_hole) &&
- parent_rb_node && parent_rb_node->rb_right != rb_node)
- return rb_hole_addr_to_node(parent_rb_node);
- }
-
- return rb_hole_addr_to_node(rb_next(rb_node));
-}
+#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
+static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
+{ \
+ struct rb_node *parent, *node = &entry->rb_hole_addr; \
+ \
+ if (!entry || RB_EMPTY_NODE(node)) \
+ return NULL; \
+ \
+ if (usable_hole_addr(node->first, size)) { \
+ node = node->first; \
+ while (usable_hole_addr(node->last, size)) \
+ node = node->last; \
+ return rb_hole_addr_to_node(node); \
+ } \
+ \
+ while ((parent = rb_parent(node)) && node == parent->first) \
+ node = parent; \
+ \
+ return rb_hole_addr_to_node(parent); \
+}
+
+DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
+DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
@@ -504,7 +460,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
return -ENOSPC;
/* Find the relevant hole to add our node to */
- hole = find_hole(mm, node->start);
+ hole = find_hole_addr(mm, node->start, 0);
if (!hole)
return -ENOSPC;
@@ -656,7 +612,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
{
return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
}
@@ -694,41 +650,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
EXPORT_SYMBOL(drm_mm_remove_node);
/**
- * drm_mm_replace_node - move an allocation from @old to @new
- * @old: drm_mm_node to remove from the allocator
- * @new: drm_mm_node which should inherit @old's allocation
- *
- * This is useful for when drivers embed the drm_mm_node structure and hence
- * can't move allocations by reassigning pointers. It's a combination of remove
- * and insert with the guarantee that the allocation start will match.
- */
-void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
-{
- struct drm_mm *mm = old->mm;
-
- DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
-
- *new = *old;
-
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
- list_replace(&old->node_list, &new->node_list);
- rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
-
- if (drm_mm_hole_follows(old)) {
- list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node_cached(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
- rb_replace_node(&old->rb_hole_addr,
- &new->rb_hole_addr,
- &mm->holes_addr);
- }
-
- clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
-}
-EXPORT_SYMBOL(drm_mm_replace_node);
-
-/**
* DOC: lru scan roster
*
* Very often GPUs need to have continuous allocations for a given object. When
@@ -742,7 +663,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* interfaces. First a scan operation needs to be initialized with
* drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
* objects to the roster, probably by walking an LRU list, but this can be
- * freely implemented. Eviction candiates are added using
+ * freely implemented. Eviction candidates are added using
* drm_mm_scan_add_block() until a suitable hole is found or there are no
* further evictable objects. Eviction roster metadata is tracked in &struct
* drm_mm_scan.
@@ -1025,6 +946,10 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
add_hole(&mm->head_node);
mm->scan_active = 0;
+
+#ifdef CONFIG_DRM_DEBUG_MM
+ stack_depot_init();
+#endif
}
EXPORT_SYMBOL(drm_mm_init);