diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_shrinker.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 90 |
1 files changed, 50 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index b1672e054b21..e0d1f369a163 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2008-2015 Intel Corporation */ @@ -13,7 +12,10 @@ #include <linux/dma-buf.h> #include <linux/vmalloc.h> +#include <drm/drm_print.h> + #include "gt/intel_gt_requests.h" +#include "gt/intel_gt.h" #include "i915_trace.h" @@ -24,7 +26,7 @@ static bool swap_available(void) static bool can_release_pages(struct drm_i915_gem_object *obj) { - /* Consider only shrinkable ojects. */ + /* Consider only shrinkable objects. */ if (!i915_gem_object_is_shrinkable(obj)) return false; @@ -116,10 +118,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, }, { NULL, 0 }, }, *phase; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; unsigned long count = 0; unsigned long scanned = 0; - int err = 0; + int err = 0, i = 0; + struct intel_gt *gt; /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); @@ -147,9 +150,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, * what we can do is give them a kick so that we do not keep idle * contexts around longer than is necessary. */ - if (shrink & I915_SHRINK_ACTIVE) - /* Retire requests to unpin all idle contexts */ - intel_gt_retire_requests(to_gt(i915)); + if (shrink & I915_SHRINK_ACTIVE) { + for_each_gt(gt, i915, i) + /* Retire requests to unpin all idle contexts */ + intel_gt_retire_requests(gt); + } /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -167,7 +172,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, * Also note that although these lists do not hold a reference to * the object we can safely grab one here: The final object * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an + * i915->mm.obj_lock and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0. */ for (phase = phases; phase->list; phase++) { @@ -182,7 +187,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, /* * We serialize our access to unreferenced objects through - * the use of the struct_mutex. While the objects are not + * the use of the obj_lock. While the objects are not * yet freed (due to RCU then a workqueue) we still want * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. @@ -257,7 +262,7 @@ skip: * i915_gem_shrink_all - Shrink buffer object caches completely * @i915: i915 device * - * This is a simple wraper around i915_gem_shrink() to aggressively shrink all + * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all * caches completely. It also first waits for and retires all outstanding * requests to also be able to release backing storage for active objects. * @@ -284,8 +289,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_i915_private *i915 = shrinker->private_data; unsigned long num_objects; unsigned long count; @@ -302,8 +306,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) if (num_objects) { unsigned long avg = 2 * count / num_objects; - i915->mm.shrinker.batch = - max((i915->mm.shrinker.batch + avg) >> 1, + i915->mm.shrinker->batch = + max((i915->mm.shrinker->batch + avg) >> 1, 128ul /* default SHRINK_BATCH */); } @@ -313,8 +317,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_i915_private *i915 = shrinker->private_data; unsigned long freed; sc->nr_scanned = 0; @@ -389,6 +392,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct i915_vma *vma, *next; unsigned long freed_pages = 0; intel_wakeref_t wakeref; + struct intel_gt *gt; + int i; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, @@ -397,24 +402,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ - mutex_lock(&to_gt(i915)->ggtt->vm.mutex); - list_for_each_entry_safe(vma, next, - &to_gt(i915)->ggtt->vm.bound_list, vm_link) { - unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; - struct drm_i915_gem_object *obj = vma->obj; - - if (!vma->iomap || i915_vma_is_active(vma)) - continue; + for_each_gt(gt, i915, i) { + mutex_lock(>->ggtt->vm.mutex); + list_for_each_entry_safe(vma, next, + >->ggtt->vm.bound_list, vm_link) { + unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; + struct drm_i915_gem_object *obj = vma->obj; + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; - if (!i915_gem_object_trylock(obj, NULL)) - continue; + if (!i915_gem_object_trylock(obj, NULL)) + continue; - if (__i915_vma_unbind(vma) == 0) - freed_pages += count; + if (__i915_vma_unbind(vma) == 0) + freed_pages += count; - i915_gem_object_unlock(obj); + i915_gem_object_unlock(obj); + } + mutex_unlock(>->ggtt->vm.mutex); } - mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; @@ -422,12 +429,17 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) { - i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - i915->mm.shrinker.count_objects = i915_gem_shrinker_count; - i915->mm.shrinker.seeks = DEFAULT_SEEKS; - i915->mm.shrinker.batch = 4096; - drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker, - "drm-i915_gem")); + i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem"); + if (!i915->mm.shrinker) { + drm_WARN_ON(&i915->drm, 1); + } else { + i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan; + i915->mm.shrinker->count_objects = i915_gem_shrinker_count; + i915->mm.shrinker->batch = 4096; + i915->mm.shrinker->private_data = i915; + + shrinker_register(i915->mm.shrinker); + } i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); @@ -443,7 +455,7 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); drm_WARN_ON(&i915->drm, unregister_oom_notifier(&i915->mm.oom_notifier)); - unregister_shrinker(&i915->mm.shrinker); + shrinker_free(i915->mm.shrinker); } void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, @@ -460,8 +472,6 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, fs_reclaim_release(GFP_KERNEL); } -#define obj_to_i915(obj__) to_i915((obj__)->base.dev) - /** * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By * default all object types that support shrinking(see IS_SHRINKABLE), will also |
