summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_request.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-28 13:58:53 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-10-28 20:53:54 +0100
commit85e17f5974b357bc4a127be09de71b430be265e0 (patch)
tree5039cbf409c2481f3a5bba1e76774b1b1d92e64b /drivers/gpu/drm/i915/i915_gem_request.c
parentcaddfe7192f5e74d65ebcfdae614f99e8fd87222 (diff)
drm/i915: Move the global sync optimisation to the timeline
Currently we try to reduce the number of synchronisations (now the number of requests we need to wait upon) by noting that if we have earlier waited upon a request, all subsequent requests in the timeline will be after the wait. This only applies to requests in this timeline, as other timelines will not be ordered by that waiter. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-30-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 06daa4d203a7..9c34a4c540b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -238,35 +238,41 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
-static int i915_gem_init_global_seqno(struct drm_i915_private *dev_priv,
- u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
- struct i915_gem_timeline *timeline = &dev_priv->gt.global_timeline;
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
+ i915_gem_retire_requests(i915);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
if (!i915_seqno_passed(seqno, timeline->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
+ while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
yield();
yield();
}
/* Finally reset hw state */
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, i915, id)
intel_engine_init_global_seqno(engine, seqno);
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
+
return 0;
}
@@ -462,7 +468,7 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
- int idx, ret;
+ int ret;
GEM_BUG_ON(to == from);
@@ -483,8 +489,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret < 0 ? ret : 0;
}
- idx = intel_engine_sync_index(from->engine, to->engine);
- if (from->global_seqno <= from->engine->semaphore.sync_seqno[idx])
+ if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -502,7 +507,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- from->engine->semaphore.sync_seqno[idx] = from->global_seqno;
+ to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
return 0;
}