diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_evict.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 174 |
1 files changed, 151 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 2b73ddb11c66..c02ebd6900ae 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,26 +31,70 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_trace.h" I915_SELFTEST_DECLARE(static struct igt_evict_ctl { bool fail_if_busy:1; } igt_evict_ctl;) -static int ggtt_flush(struct intel_gt *gt) +static bool dying_vma(struct i915_vma *vma) +{ + return !kref_read(&vma->obj->base.refcount); +} + +static int ggtt_flush(struct i915_address_space *vm) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct intel_gt *gt; + int ret = 0; + + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { + /* + * Not everything in the GGTT is tracked via vma (otherwise we + * could evict as required with minimal stalling) so we are forced + * to idle the GPU and explicitly retire outstanding requests in + * the hopes that we can then remove contexts and the like only + * bound by their active reference. + */ + ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + } + return ret; +} + +static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) { /* - * Not everything in the GGTT is tracked via vma (otherwise we - * could evict as required with minimal stalling) so we are forced - * to idle the GPU and explicitly retire outstanding requests in - * the hopes that we can then remove contexts and the like only - * bound by their active reference. + * We add the extra refcount so the object doesn't drop to zero until + * after ungrab_vma(), this way trylock is always paired with unlock. */ - return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); + if (i915_gem_object_get_rcu(vma->obj)) { + if (!i915_gem_object_trylock(vma->obj, ww)) { + i915_gem_object_put(vma->obj); + return false; + } + } else { + /* Dead objects don't need pins */ + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + } + + return true; +} + +static void ungrab_vma(struct i915_vma *vma) +{ + if (dying_vma(vma)) + return; + + i915_gem_object_unlock(vma->obj); + i915_gem_object_put(vma->obj); } static bool mark_free(struct drm_mm_scan *scan, + struct i915_gem_ww_ctx *ww, struct i915_vma *vma, unsigned int flags, struct list_head *unwind) @@ -58,6 +102,9 @@ mark_free(struct drm_mm_scan *scan, if (i915_vma_is_pinned(vma)) return false; + if (!grab_vma(vma, ww)) + return false; + list_add(&vma->evict_link, unwind); return drm_mm_scan_add_block(scan, &vma->node); } @@ -76,6 +123,7 @@ static bool defer_evict(struct i915_vma *vma) /** * i915_gem_evict_something - Evict vmas to make room for binding a new one * @vm: address space to evict from + * @ww: An optional struct i915_gem_ww_ctx. * @min_size: size of the desired free space * @alignment: alignment constraint of the desired free space * @color: color for the desired space @@ -98,6 +146,7 @@ static bool defer_evict(struct i915_vma *vma) */ int i915_gem_evict_something(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww, u64 min_size, u64 alignment, unsigned long color, u64 start, u64 end, @@ -109,6 +158,7 @@ i915_gem_evict_something(struct i915_address_space *vm, struct drm_mm_node *node; enum drm_mm_insert_mode mode; struct i915_vma *active; + struct intel_gt *gt; int ret; lockdep_assert_held(&vm->mutex); @@ -134,7 +184,14 @@ i915_gem_evict_something(struct i915_address_space *vm, min_size, alignment, color, start, end, mode); - intel_gt_retire_requests(vm->gt); + if (i915_is_ggtt(vm)) { + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + intel_gt_retire_requests(gt); + } else { + intel_gt_retire_requests(vm->gt); + } search_again: active = NULL; @@ -170,7 +227,7 @@ search_again: continue; } - if (mark_free(&scan, vma, flags, &eviction_list)) + if (mark_free(&scan, ww, vma, flags, &eviction_list)) goto found; } @@ -178,6 +235,7 @@ search_again: list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { ret = drm_mm_scan_remove_block(&scan, &vma->node); BUG_ON(ret); + ungrab_vma(vma); } /* @@ -205,7 +263,7 @@ search_again: if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) return -EBUSY; - ret = ggtt_flush(vm->gt); + ret = ggtt_flush(vm); if (ret) return ret; @@ -222,10 +280,12 @@ found: * of any of our objects, thus corrupting the list). */ list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { - if (drm_mm_scan_remove_block(&scan, &vma->node)) + if (drm_mm_scan_remove_block(&scan, &vma->node)) { __i915_vma_pin(vma); - else + } else { list_del(&vma->evict_link); + ungrab_vma(vma); + } } /* Unbinding will emit any required flushes */ @@ -234,16 +294,20 @@ found: __i915_vma_unpin(vma); if (ret == 0) ret = __i915_vma_unbind(vma); + ungrab_vma(vma); } while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { vma = container_of(node, struct i915_vma, node); /* If we find any non-objects (!vma), we cannot evict them */ - if (vma->node.color != I915_COLOR_UNEVICTABLE) + if (vma->node.color != I915_COLOR_UNEVICTABLE && + grab_vma(vma, ww)) { ret = __i915_vma_unbind(vma); - else - ret = -ENOSPC; /* XXX search failed, try again? */ + ungrab_vma(vma); + } else { + ret = -ENOSPC; + } } return ret; @@ -252,6 +316,7 @@ found: /** * i915_gem_evict_for_node - Evict vmas to make room for binding a new one * @vm: address space to evict from + * @ww: An optional struct i915_gem_ww_ctx. * @target: range (and color) to evict for * @flags: additional flags to control the eviction algorithm * @@ -261,6 +326,7 @@ found: * memory in e.g. the shrinker. */ int i915_gem_evict_for_node(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww, struct drm_mm_node *target, unsigned int flags) { @@ -283,7 +349,15 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * a stray pin (preventing eviction) that can only be resolved by * retiring. */ - intel_gt_retire_requests(vm->gt); + if (i915_is_ggtt(vm)) { + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct intel_gt *gt; + + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + intel_gt_retire_requests(gt); + } else { + intel_gt_retire_requests(vm->gt); + } if (i915_vm_has_cache_coloring(vm)) { /* Expand search to cover neighbouring guard pages (or lack!) */ @@ -333,6 +407,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, break; } + if (!grab_vma(vma, ww)) { + ret = -ENOSPC; + break; + } + /* * Never show fear in the face of dragons! * @@ -350,6 +429,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, __i915_vma_unpin(vma); if (ret == 0) ret = __i915_vma_unbind(vma); + + ungrab_vma(vma); } return ret; @@ -358,6 +439,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, /** * i915_gem_evict_vm - Evict all idle vmas from a vm * @vm: Address space to cleanse + * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm + * will be able to evict vma's locked by the ww as well. + * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then + * in the event i915_gem_evict_vm() is unable to trylock an object for eviction, + * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop + * the vm->mutex, before trying again to acquire the contended lock. The caller + * also owns a reference to the object. * * This function evicts all vmas from a vm. * @@ -367,7 +455,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ -int i915_gem_evict_vm(struct i915_address_space *vm) +int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object **busy_bo) { int ret = 0; @@ -380,7 +469,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm) * switch otherwise is ineffective. */ if (i915_is_ggtt(vm)) { - ret = ggtt_flush(vm->gt); + ret = ggtt_flush(vm); if (ret) return ret; } @@ -388,24 +477,63 @@ int i915_gem_evict_vm(struct i915_address_space *vm) do { struct i915_vma *vma, *vn; LIST_HEAD(eviction_list); + LIST_HEAD(locked_eviction_list); list_for_each_entry(vma, &vm->bound_list, vm_link) { if (i915_vma_is_pinned(vma)) continue; + /* + * If we already own the lock, trylock fails. In case + * the resv is shared among multiple objects, we still + * need the object ref. + */ + if (!i915_gem_object_get_rcu(vma->obj) || + (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { + __i915_vma_pin(vma); + list_add(&vma->evict_link, &locked_eviction_list); + continue; + } + + if (!i915_gem_object_trylock(vma->obj, ww)) { + if (busy_bo) { + *busy_bo = vma->obj; /* holds ref */ + ret = -EBUSY; + break; + } + i915_gem_object_put(vma->obj); + continue; + } + __i915_vma_pin(vma); list_add(&vma->evict_link, &eviction_list); } - if (list_empty(&eviction_list)) + if (list_empty(&eviction_list) && list_empty(&locked_eviction_list)) break; - ret = 0; + /* Unbind locked objects first, before unlocking the eviction_list */ + list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) { + __i915_vma_unpin(vma); + + if (ret == 0) { + ret = __i915_vma_unbind(vma); + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } + if (!dying_vma(vma)) + i915_gem_object_put(vma->obj); + } + list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { __i915_vma_unpin(vma); - if (ret == 0) + if (ret == 0) { ret = __i915_vma_unbind(vma); - if (ret != -EINTR) /* "Get me out of here!" */ - ret = 0; + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } + + i915_gem_object_unlock(vma->obj); + i915_gem_object_put(vma->obj); } } while (ret == 0); |
