summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-06-25 14:01:09 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-06-25 20:17:22 +0100
commit07bfe6bf1052f074093cdea95d6041f48b994c4b (patch)
treec184d7d430b3ec222e91e2773290c722088844fa /drivers/gpu/drm/i915/gt
parent9a6a644096a1066c2ef5c47db0db79ac72af7967 (diff)
drm/i915/execlists: Convert recursive defer_request() into iterative
As this engine owns the lock around rq->sched.link (for those waiters submitted to this engine), we can use that link as an element in a local list. We can thus replace the recursive algorithm with an iterative walk over the ordered list of waiters. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c52
1 files changed, 27 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 28685ba91a2c..22afd2616d7f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
return *last;
}
-static void
-defer_request(struct i915_request * const rq, struct list_head * const pl)
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
- struct i915_dependency *p;
+ LIST_HEAD(list);
/*
* We want to move the interrupted request to the back of
@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
* flight and were waiting for the interrupted request to
* be run after it again.
*/
- list_move_tail(&rq->sched.link, pl);
+ do {
+ struct i915_dependency *p;
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
- /* No waiter should start before the active request completed */
- GEM_BUG_ON(i915_request_started(w));
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- if (rq_prio(w) < rq_prio(rq))
- continue;
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_started(w) &&
+ !i915_request_completed(rq));
- if (list_empty(&w->sched.link))
- continue; /* Not yet submitted; unready */
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (list_empty(&w->sched.link))
+ continue; /* Not yet submitted; unready */
- /*
- * This should be very shallow as it is limited by the
- * number of requests that can fit in a ring (<64) and
- * the number of contexts that can be in flight on this
- * engine.
- */
- defer_request(w, pl);
- }
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
}
static void defer_active(struct intel_engine_cs *engine)