summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-06-12 11:57:20 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-06-12 13:36:43 +0100
commitecab9be174d98ffbc69d614978f2372ca2ef54c9 (patch)
tree193e1c61beac6c95ce163c74fd563fbbb5115d9b /drivers/gpu/drm/i915/i915_vma.c
parent6ce1c33d6c36fb3858e8e956d72586f7a024ed3a (diff)
drm/i915: Combine unbound/bound list tracking for objects
With async binding, we don't want to manage a bound/unbound list as we may end up running before we even acquire the pages. All that is required is keeping track of shrinkable objects, so reduce it to the minimum list. Fixes: 6951e5893b48 ("drm/i915: Move GEM object domain management from struct_mutex to local") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190612105720.30310-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c34
1 files changed, 5 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 80050f6a0893..cb341e4acf99 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -83,10 +83,7 @@ static void obj_bump_mru(struct drm_i915_gem_object *obj)
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->bind_count)
- list_move_tail(&obj->mm.link, &i915->mm.bound_list);
-
+ list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
obj->mm.dirty = true; /* be paranoid */
@@ -538,7 +535,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
* assume that no else is pinning the pages, but as a rough assertion
* that we will not run into problems later, this will do!)
*/
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
}
/**
@@ -680,18 +677,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
-
- if (i915_gem_object_is_shrinkable(obj))
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
-
- obj->bind_count++;
- assert_bind_count(obj);
-
- spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
+ atomic_inc(&vma->obj->bind_count);
+ assert_bind_count(vma->obj);
}
return 0;
@@ -707,8 +694,6 @@ err_unpin:
static void
i915_vma_remove(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
@@ -725,17 +710,8 @@ i915_vma_remove(struct i915_vma *vma)
*/
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- GEM_BUG_ON(obj->bind_count == 0);
- if (--obj->bind_count == 0 &&
- i915_gem_object_is_shrinkable(obj) &&
- obj->mm.madv == I915_MADV_WILLNEED)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ atomic_dec(&obj->bind_count);
/*
* And finally now the object is completely decoupled from this