summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_shrinker.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-04 07:52:26 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-04 08:09:17 +0100
commit15717de219f2ea4792d27ff62c29d08e46cda7f8 (patch)
treebddf0f5e92ec4833db4d549e635fa3081bec6b6a /drivers/gpu/drm/i915/i915_gem_shrinker.c
parent2bfa996e031bdc6de1567ee05438f8a310fa7a4c (diff)
drm/i915: Count how many VMA are bound for an object
Since we may have VMA allocated for an object, but we interrupted their binding, there is a disparity between have elements on the obj->vma_list and being bound. i915_gem_obj_bound_any() does this check, but this is not rigorously observed - add an explicit count to make it easier. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-7-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5d4772c146b1..b95cd9f404f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- int count = 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (drm_mm_node_allocated(&vma->node))
- count++;
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count)
- count++;
- }
+ return true;
- return count;
+ return false;
}
static bool swap_available(void)
@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count != num_vma_bound(obj))
+ if (obj->pages_pin_count > obj->bind_count)
+ return false;
+
+ if (any_vma_pinned(obj))
return false;
/* We can only return physical pages to the system if we can either