summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-08-22 10:03:07 +1000
committerDave Airlie <airlied@redhat.com>2017-08-22 10:03:07 +1000
commit735f463af70e9601881ec879961ec42aef051733 (patch)
tree766a5feeb05521fb2370a1b24c4d008c54532986 /drivers/gpu/drm/i915/gvt/scheduler.c
parent3aadb888b1b62ba04798414cae431d3c3bd5f452 (diff)
parenta42894ebb50d831ec0b7ee9bee7f5a5a37bad7e1 (diff)
Merge tag 'drm-intel-next-2017-08-18' of git://anongit.freedesktop.org/git/drm-intel into drm-next
Final pile of features for 4.14 - New ioctl to change NOA configurations, plus prep (Lionel) - CCS (color compression) scanout support, based on the fancy new modifier additions (Ville&Ben) - Document i915 register macro style (Jani) - Many more gen10/cnl patches (Rodrigo, Pualo, ...) - More gpu reset vs. modeset duct-tape to restore the old way. - prep work for cnl: hpd_pin reorg (Rodrigo), support for more power wells (Imre), i2c pin reorg (Anusha) - drm_syncobj support (Jason Ekstrand) - forcewake vs gpu reset fix (Chris) - execbuf speedup for the no-relocs fastpath, anv/vk low-overhead ftw (Chris) - switch to idr/radixtree instead of the resizing ht for execbuf id->vma lookups (Chris) gvt: - MMIO save/restore optimization (Changbin) - Split workload scan vs. dispatch for more parallel exec (Ping) - vGPU full 48bit ppgtt support (Joonas, Tina) - vGPU hw id expose for perf (Zhenyu) Bunch of work all over to make the igt CI runs more complete/stable. Watch https://intel-gfx-ci.01.org/tree/drm-tip/shards-all.html for progress in getting this ready. Next week we're going into production mode (i.e. will send results to intel-gfx) on hsw, more platforms to come. Also, a new maintainer tram, I'm stepping out. Huge thanks to Jani for being an awesome co-maintainer the past few years, and all the best for Jani, Joonas&Rodrigo as the new maintainers! * tag 'drm-intel-next-2017-08-18' of git://anongit.freedesktop.org/git/drm-intel: (179 commits) drm/i915: Update DRIVER_DATE to 20170818 drm/i915/bxt: use NULL for GPIO connection ID drm/i915: Mark the GT as busy before idling the previous request drm/i915: Trivial grammar fix s/opt of/opt out of/ in comment drm/i915: Replace execbuf vma ht with an idr drm/i915: Simplify eb_lookup_vmas() drm/i915: Convert execbuf to use struct-of-array packing for critical fields drm/i915: Check context status before looking up our obj/vma drm/i915: Don't use MI_STORE_DWORD_IMM on Sandybridge/vcs drm/i915: Stop touching forcewake following a gen6+ engine reset MAINTAINERS: drm/i915 has a new maintainer team drm/i915: Split pin mapping into per platform functions drm/i915/opregion: let user specify override VBT via firmware load drm/i915/cnl: Reuse skl_wm_get_hw_state on Cannonlake. drm/i915/gen10: implement gen 10 watermarks calculations drm/i915/cnl: Fix LSPCON support. drm/i915/vbt: ignore extraneous child devices for a port drm/i915/cnl: Setup PAT Index. drm/i915/edp: Allow alternate fixed mode for eDP if available. drm/i915: Add support for drm syncobjs ...
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c111
1 files changed, 80 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c873136add97..391800d2067b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int dispatch_workload(struct intel_vgpu_workload *workload)
+static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc = 0;
+
+ desc = ce->lrc_desc;
+
+ /* Update bits 0-11 of the context descriptor which includes flags
+ * like GEN8_CTX_* cached in desc_template
+ */
+ desc &= U64_MAX << 12;
+ desc |= ctx->desc_template & ((1ULL << 12) - 1);
+
+ ce->lrc_desc = desc;
+}
+
+/**
+ * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
+ * shadow it as well, include ringbuffer,wa_ctx and ctx.
+ * @workload: an abstract entity for each execlist submission.
+ *
+ * This function is called before the workload submitting to i915, to make
+ * sure the content of the workload is valid.
+ */
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (workload->shadowed)
+ return 0;
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return ret;
- }
+ if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(shadow_ctx,
+ dev_priv->engine[ring_id]);
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
- ret = intel_gvt_scan_and_shadow_workload(workload);
+ ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
+ workload->shadowed = true;
+
+out:
+ return ret;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
+ int ret = 0;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto out;
}
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto out;
+ }
- ret = 0;
- workload->dispatched = true;
out:
if (ret)
workload->status = ret;
- if (!IS_ERR_OR_NULL(rq))
- i915_add_request(rq);
- else
- engine->context_unpin(engine, shadow_ctx);
+ if (!IS_ERR_OR_NULL(workload->req)) {
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+ i915_add_request(workload->req);
+ workload->dispatched = true;
+ }
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
@@ -631,5 +678,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
+ bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
return 0;
}