summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_domain.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-05-30 21:34:59 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-05-31 21:23:51 +0100
commit3b4fa9640ccded07fff6d563d3ac1b2f3f111d97 (patch)
tree69182acdcb860677b9482570363992ec49c504d3 /drivers/gpu/drm/i915/gem/i915_gem_domain.c
parent7ef5ef5cdead61b8bc17493aae565962611a2918 (diff)
drm/i915: Track the purgeable objects on a separate eviction list
Currently the purgeable objects, I915_MADV_DONTNEED, are mixed in the normal bound/unbound lists. Every shrinker pass starts with an attempt to purge from this set of unneeded objects, which entails us doing a walk over both lists looking for any candidates. If there are none, and since we are shrinking we can reasonably assume that the lists are full!, this becomes a very slow futile walk. If we separate out the purgeable objects into own list, this search then becomes its own phase that is preferentially handled during shrinking. Instead the cost becomes that we then need to filter the purgeable list if we want to distinguish between bound and unbound objects. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.william.auld@gmail.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_domain.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index cce96e6c6e52..52b73e90c9f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -462,7 +462,6 @@ err_unpin_global:
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct list_head *list;
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -476,10 +475,15 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
}
mutex_unlock(&i915->ggtt.vm.mutex);
- spin_lock(&i915->mm.obj_lock);
- list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->mm.link, list);
- spin_unlock(&i915->mm.obj_lock);
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ struct list_head *list;
+
+ spin_lock(&i915->mm.obj_lock);
+ list = obj->bind_count ?
+ &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->mm.link, list);
+ spin_unlock(&i915->mm.obj_lock);
+ }
}
void