summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-01-30 18:17:10 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2020-01-30 21:35:43 +0000
commite3793468b4660a9825eb3a149aab1bcd0de7a4f2 (patch)
tree548f103e1c902446cf136ce28f0cd550d5ab3661 /drivers/gpu/drm/i915/i915_active.c
parente986209c67024c7d1e7c4c9f0ac0d75ef9d968f5 (diff)
drm/i915: Use the async worker to avoid reclaim tainting the ggtt->mutex
On Braswell and Broxton (also known as Valleyview and Apollolake), we need to serialise updates of the GGTT using the big stop_machine() hammer. This has the side effect of appearing to lockdep as a possible reclaim (since it uses the cpuhp mutex and that is tainted by per-cpu allocations). However, we want to use vm->mutex (including ggtt->mutex) from within the shrinker and so must avoid such possible taints. For this purpose, we introduced the asynchronous vma binding and we can apply it to the PIN_GLOBAL so long as take care to add the necessary waits for the worker afterwards. Closes: https://gitlab.freedesktop.org/drm/intel/issues/211 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200130181710.2030251-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3d2e7cf55e52..da58e5d084f4 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -390,13 +390,19 @@ out:
return err;
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
+ struct dma_fence *prev;
+
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- if (!__i915_active_fence_set(&ref->excl, f))
+ prev = __i915_active_fence_set(&ref->excl, f);
+ if (!prev)
atomic_inc(&ref->count);
+
+ return prev;
}
bool i915_active_acquire_if_busy(struct i915_active *ref)