summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c220
1 files changed, 118 insertions, 102 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3cc5fbd78ee2..6692abe564d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,12 +42,14 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_mm.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+
+#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
/**
* DOC: Overview
@@ -106,25 +108,17 @@
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH,
- .skip = 1
- };
+ unsigned int n;
- save_stack_trace(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
- node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+ node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long entries[STACKDEPTH];
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -132,19 +126,13 @@ static void show_leaks(struct drm_mm *mm)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH
- };
-
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
- depot_fetch_stack(node->stack, &trace);
- snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ stack_depot_snprint(node->stack, buf, BUFSZ, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
@@ -164,7 +152,7 @@ static void show_leaks(struct drm_mm *mm) { }
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
u64, __subtree_last,
- START, LAST, static inline, drm_mm_interval_tree)
+ START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
@@ -184,7 +172,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -222,20 +210,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
-#define RB_INSERT(root, member, expr) do { \
- struct rb_node **link = &root.rb_node, *rb = NULL; \
- u64 x = expr(node); \
- while (*link) { \
- rb = *link; \
- if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
- link = &rb->rb_left; \
- else \
- link = &rb->rb_right; \
- } \
- rb_link_node(&node->member, rb, link); \
- rb_insert_color(&node->member, &root); \
-} while (0)
-
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
@@ -265,16 +239,42 @@ static void insert_hole_size(struct rb_root_cached *root,
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drm_mm_node, rb_hole_addr,
+ u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+ u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+ struct drm_mm_node *parent;
+
+ while (*link) {
+ rb_parent = *link;
+ parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+ if (parent->subtree_max_hole < subtree_max_hole)
+ parent->subtree_max_hole = subtree_max_hole;
+ if (start < HOLE_ADDR(parent))
+ link = &parent->rb_hole_addr.rb_left;
+ else
+ link = &parent->rb_hole_addr.rb_right;
+ }
+
+ rb_link_node(&node->rb_hole_addr, rb_parent, link);
+ rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
node->hole_size =
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+ node->subtree_max_hole = node->hole_size;
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
insert_hole_size(&mm->holes_size, node);
- RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+ insert_hole_addr(&mm->holes_addr, node);
list_add(&node->hole_stack, &mm->hole_stack);
}
@@ -285,8 +285,10 @@ static void rm_hole(struct drm_mm_node *node)
list_del(&node->hole_stack);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
- rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+ rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+ &augment_callbacks);
node->hole_size = 0;
+ node->subtree_max_hole = 0;
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}
@@ -301,11 +303,6 @@ static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
}
-static inline u64 rb_hole_size(struct rb_node *rb)
-{
- return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
-}
-
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
@@ -326,7 +323,12 @@ static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
return best;
}
-static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
+static bool usable_hole_addr(struct rb_node *rb, u64 size)
+{
+ return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
+}
+
+static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
{
struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
@@ -334,6 +336,9 @@ static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
while (rb) {
u64 hole_start;
+ if (!usable_hole_addr(rb, size))
+ break;
+
node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
@@ -359,10 +364,10 @@ first_hole(struct drm_mm *mm,
return best_hole(mm, size);
case DRM_MM_INSERT_LOW:
- return find_hole(mm, start);
+ return find_hole_addr(mm, start, size);
case DRM_MM_INSERT_HIGH:
- return find_hole(mm, end);
+ return find_hole_addr(mm, end, size);
case DRM_MM_INSERT_EVICT:
return list_first_entry_or_null(&mm->hole_stack,
@@ -371,9 +376,45 @@ first_hole(struct drm_mm *mm,
}
}
+/**
+ * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
+ * @name: name of function to declare
+ * @first: first rb member to traverse (either rb_left or rb_right).
+ * @last: last rb member to traverse (either rb_right or rb_left).
+ *
+ * This macro declares a function to return the next hole of the addr rb tree.
+ * While traversing the tree we take the searched size into account and only
+ * visit branches with potential big enough holes.
+ */
+
+#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
+static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
+{ \
+ struct rb_node *parent, *node = &entry->rb_hole_addr; \
+ \
+ if (!entry || RB_EMPTY_NODE(node)) \
+ return NULL; \
+ \
+ if (usable_hole_addr(node->first, size)) { \
+ node = node->first; \
+ while (usable_hole_addr(node->last, size)) \
+ node = node->last; \
+ return rb_hole_addr_to_node(node); \
+ } \
+ \
+ while ((parent = rb_parent(node)) && node == parent->first) \
+ node = parent; \
+ \
+ return rb_hole_addr_to_node(parent); \
+}
+
+DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
+DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
+
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
+ u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
@@ -382,10 +423,10 @@ next_hole(struct drm_mm *mm,
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
- return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+ return next_hole_low_addr(node, size);
case DRM_MM_INSERT_HIGH:
- return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+ return next_hole_high_addr(node, size);
case DRM_MM_INSERT_EVICT:
node = list_next_entry(node, hole_stack);
@@ -409,17 +450,17 @@ next_hole(struct drm_mm *mm,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
+ u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
return -ENOSPC;
/* Find the relevant hole to add our node to */
- hole = find_hole(mm, node->start);
+ hole = find_hole_addr(mm, node->start, 0);
if (!hole)
return -ENOSPC;
@@ -434,9 +475,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -482,7 +523,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
u64 remainder_mask;
bool once;
- DRM_MM_BUG_ON(range_start >= range_end);
+ DRM_MM_BUG_ON(range_start > range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
@@ -499,7 +540,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
- hole = once ? NULL : next_hole(mm, hole, mode)) {
+ hole = once ? NULL : next_hole(mm, hole, size, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -553,9 +594,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -571,6 +612,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -584,8 +630,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -594,48 +640,14 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
-/**
- * drm_mm_replace_node - move an allocation from @old to @new
- * @old: drm_mm_node to remove from the allocator
- * @new: drm_mm_node which should inherit @old's allocation
- *
- * This is useful for when drivers embed the drm_mm_node structure and hence
- * can't move allocations by reassigning pointers. It's a combination of remove
- * and insert with the guarantee that the allocation start will match.
- */
-void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
-{
- struct drm_mm *mm = old->mm;
-
- DRM_MM_BUG_ON(!old->allocated);
-
- *new = *old;
-
- list_replace(&old->node_list, &new->node_list);
- rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
-
- if (drm_mm_hole_follows(old)) {
- list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node_cached(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
- rb_replace_node(&old->rb_hole_addr,
- &new->rb_hole_addr,
- &mm->holes_addr);
- }
-
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
-EXPORT_SYMBOL(drm_mm_replace_node);
+EXPORT_SYMBOL(drm_mm_remove_node);
/**
* DOC: lru scan roster
@@ -651,7 +663,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* interfaces. First a scan operation needs to be initialized with
* drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
* objects to the roster, probably by walking an LRU list, but this can be
- * freely implemented. Eviction candiates are added using
+ * freely implemented. Eviction candidates are added using
* drm_mm_scan_add_block() until a suitable hole is found or there are no
* further evictable objects. Eviction roster metadata is tracked in &struct
* drm_mm_scan.
@@ -741,9 +753,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -816,7 +828,7 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_insert_node_in_range_generic() or one of the
* simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
- * the just freed block (because its at the top of the free_stack list).
+ * the just freed block (because it's at the top of the free_stack list).
*
* Returns:
* True if this block should be evicted, false otherwise. Will always
@@ -828,8 +840,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -927,13 +939,17 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
add_hole(&mm->head_node);
mm->scan_active = 0;
+
+#ifdef CONFIG_DRM_DEBUG_MM
+ stack_depot_init();
+#endif
}
EXPORT_SYMBOL(drm_mm_init);