summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2021-09-09 09:47:36 -0700
committerJohn Harrison <John.C.Harrison@Intel.com>2021-09-13 11:30:44 -0700
commitb0d83888a32b30cb95bee7385151ac58d51a2340 (patch)
tree8412aa7f7d633fde28bdd0b72101916210b5cfde /drivers/gpu/drm/i915
parentae36b62927f1cfe81095641d6279cbf23fb64b2a (diff)
drm/i915/guc: Release submit fence from an irq_work
A subsequent patch will flip the locking hierarchy from ce->guc_state.lock -> sched_engine->lock to sched_engine->lock -> ce->guc_state.lock. As such we need to release the submit fence for a request from an IRQ to break a lock inversion - i.e. the fence must be release went holding ce->guc_state.lock and the releasing of the can acquire sched_engine->lock. v2: (Daniele) - Delete request from list before calling irq_work_queue Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-16-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c22
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
2 files changed, 24 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index dcd7a09f8559..4b7ccf9730a3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2049,17 +2049,32 @@ static const struct intel_context_ops guc_context_ops = {
.create_virtual = guc_create_virtual,
};
+static void submit_work_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+ might_lock(&rq->engine->sched_engine->lock);
+ i915_sw_fence_complete(&rq->submit);
+}
+
static void __guc_signal_context_fence(struct intel_context *ce)
{
- struct i915_request *rq;
+ struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
- list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
- i915_sw_fence_complete(&rq->submit);
+ /*
+ * Use an IRQ to ensure locking order of sched_engine->lock ->
+ * ce->guc_state.lock is preserved.
+ */
+ list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+ guc_fence_link) {
+ list_del(&rq->guc_fence_link);
+ irq_work_queue(&rq->submit_work);
+ }
INIT_LIST_HEAD(&ce->guc_state.fences);
}
@@ -2169,6 +2184,7 @@ out:
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+ init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..d818cfbfc41d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -218,6 +218,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+ /**
+ * @submit_work: complete submit fence from an IRQ if needed for
+ * locking hierarchy reasons.
+ */
+ struct irq_work submit_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.