summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c86
1 files changed, 48 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index be6c39adebdf..2a8160f603ab 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -154,9 +154,7 @@
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_ACTIVE_IDLE | \
- GEN8_CTX_STATUS_PREEMPTED | \
- GEN8_CTX_STATUS_ELEMENT_SWITCH)
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
@@ -221,37 +219,6 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_ring *ring);
/**
- * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev_priv: i915 device private
- * @enable_execlists: value of i915.enable_execlists module parameter.
- *
- * Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better).
- *
- * Return: 1 if Execlists is supported and has to be enabled.
- */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
-{
- /* On platforms with execlist available, vGPU will only
- * support execlist mode, no ring buffer mode.
- */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
- return 1;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return 1;
-
- if (enable_execlists == 0)
- return 0;
-
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv))
- return 1;
-
- return 0;
-}
-
-/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
* @ctx: Context to work on
@@ -412,6 +379,20 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
status, rq);
}
+static inline void
+execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+{
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(rq->engine);
+}
+
+static inline void
+execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+{
+ intel_engine_context_out(rq->engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+}
+
static void
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
@@ -463,7 +444,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ execlists_context_schedule_in(rq);
port_set(&port[n], port_pack(rq, count));
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
@@ -479,6 +460,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
elsp_write(desc, elsp);
}
+ execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -531,6 +513,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
elsp_write(0, elsp);
elsp_write(ce->lrc_desc, elsp);
+ execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -577,9 +560,20 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
+ GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
+ /*
+ * If we write to ELSP a second time before the HW has had
+ * a chance to respond to the previous write, we can confuse
+ * the HW and hit "undefined behaviour". After writing to ELSP,
+ * we must then wait until we see a context-switch event from
+ * the HW to indicate that it has had a chance to respond.
+ */
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
+ goto unlock;
+
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) {
@@ -713,6 +707,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
struct drm_i915_gem_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
+ intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq);
@@ -873,10 +868,22 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s csb[%dd]: status=0x%08x:0x%08x\n",
engine->name, head,
status, buf[2*head + 1]);
+
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
buf[2*head + 1] == PREEMPT_ID) {
execlists_cancel_port_requests(execlists);
execlists_unwind_incomplete_requests(execlists);
@@ -907,9 +914,10 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!i915_gem_request_completed(rq));
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-
+ execlists_context_schedule_out(rq);
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
@@ -1911,6 +1919,8 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->park = NULL;
engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
}
static void