diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-02 07:59:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-02 07:59:23 -0700 |
commit | 320b164abb32db876866a4ff8c2cb710524ac6ea (patch) | |
tree | 1f79119cde6e24c9f1d01fb1e51252bca7c4cdd5 /drivers/gpu/drm/i915/gvt/mmio_context.c | |
parent | 0adb32858b0bddf4ada5f364a84ed60b196dbcda (diff) | |
parent | 694f54f680f7fd8e9561928fbfc537d9afbc3d79 (diff) |
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Cannonlake and Vega12 support are probably the two major things. This
pull lacks nouveau, Ben had some unforseen leave and a few other
blockers so we'll see how things look or maybe leave it for this merge
window.
core:
- Device links to handle sound/gpu pm dependency
- Color encoding/range properties
- Plane clipping into plane check helper
- Backlight helpers
- DP TP4 + HBR3 helper support
amdgpu:
- Vega12 support
- Enable DC by default on all supported GPUs
- Powerplay restructuring and cleanup
- DC bandwidth calc updates
- DC backlight on pre-DCE11
- TTM backing store dropping support
- SR-IOV fixes
- Adding "wattman" like functionality
- DC crc support
- Improved DC dual-link handling
amdkfd:
- GPUVM support for dGPU
- KFD events for dGPU
- Enable PCIe atomics for dGPUs
- HSA process eviction support
- Live-lock fixes for process eviction
- VM page table allocation fix for large-bar systems
panel:
- Raydium RM68200
- AUO G104SN02 V2
- KEO TX31D200VM0BAA
- ARM Versatile panels
i915:
- Cannonlake support enabled
- AUX-F port support added
- Icelake base enabling until internal milestone of forcewake support
- Query uAPI interface (used for GPU topology information currently)
- Compressed framebuffer support for sprites
- kmem cache shrinking when GPU is idle
- Avoid boosting GPU when waited item is being processed already
- Avoid retraining LSPCON link unnecessarily
- Decrease request signaling latency
- Deprecation of I915_SET_COLORKEY_NONE
- Kerneldoc and compiler warning cleanup for upcoming CI enforcements
- Full range ycbcr toggling
- HDCP support
i915/gvt:
- Big refactor for shadow ppgtt
- KBL context save/restore via LRI cmd (Weinan)
- Properly unmap dma for guest page (Changbin)
vmwgfx:
- Lots of various improvements
etnaviv:
- Use the drm gpu scheduler
- prep work for GC7000L support
vc4:
- fix alpha blending
- Expose perf counters to userspace
pl111:
- Bandwidth checking/limiting
- Versatile panel support
sun4i:
- A83T HDMI support
- A80 support
- YUV plane support
- H3/H5 HDMI support
omapdrm:
- HPD support for DVI connector
- remove lots of static variables
msm:
- DSI updates from 10nm / SDM845
- fix for race condition with a3xx/a4xx fence completion irq
- some refactoring/prep work for eventual a6xx support (ie. when we
have a userspace)
- a5xx debugfs enhancements
- some mdp5 fixes/cleanups to prepare for eventually merging
writeback
- support (ie. when we have a userspace)
tegra:
- mmap() fixes for fbdev devices
- Overlay plane for hw cursor fix
- dma-buf cache maintenance support
mali-dp:
- YUV->RGB conversion support
rockchip:
- rk3399/chromebook fixes and improvements
rcar-du:
- LVDS support move to drm bridge
- DT bindings for R8A77995
- Driver/DT support for R8A77970
tilcdc:
- DRM panel support"
* tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux: (1646 commits)
drm/i915: Fix hibernation with ACPI S0 target state
drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt
drm/i915: Specify which engines to reset following semaphore/event lockups
drm/i915/dp: Write to SET_POWER dpcd to enable MST hub.
drm/amdkfd: Use ordered workqueue to restore processes
drm/amdgpu: Fix acquiring VM on large-BAR systems
drm/amd/pp: clean header file hwmgr.h
drm/amd/pp: use mlck_table.count for array loop index limit
drm: Fix uabi regression by allowing garbage mode->type from userspace
drm/amdgpu: Add an ATPX quirk for hybrid laptop
drm/amdgpu: fix spelling mistake: "asssert" -> "assert"
drm/amd/pp: Add new asic support in pp_psm.c
drm/amd/pp: Clean up powerplay code on Vega12
drm/amd/pp: Add smu irq handlers for legacy asics
drm/amd/pp: Fix set wrong temperature range on smu7
drm/amdgpu: Don't change preferred domian when fallback GTT v5
drm/vmwgfx: Bump version patchlevel and date
drm/vmwgfx: use monotonic event timestamps
drm/vmwgfx: Unpin the screen object backup buffer when not used
drm/vmwgfx: Stricter count of legacy surface device resources
...
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/mmio_context.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 210 |
1 files changed, 191 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 152df3d0291e..a5bac83d53a9 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -50,6 +50,8 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) +#define GEN9_MOCS_SIZE 64 + /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ @@ -152,8 +154,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { static struct { bool initialized; - u32 control_table[I915_NUM_ENGINES][64]; - u32 l3cc_table[32]; + u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; + u32 l3cc_table[GEN9_MOCS_SIZE / 2]; } gen9_render_mocs; static void load_render_mocs(struct drm_i915_private *dev_priv) @@ -170,7 +172,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { offset.reg = regs[ring_id]; - for (i = 0; i < 64; i++) { + for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = I915_READ_FW(offset); offset.reg += 4; @@ -178,7 +180,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) } offset.reg = 0xb020; - for (i = 0; i < 32; i++) { + for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { gen9_render_mocs.l3cc_table[i] = I915_READ_FW(offset); offset.reg += 4; @@ -186,6 +188,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) gen9_render_mocs.initialized = true; } +static int +restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, + struct i915_request *req) +{ + u32 *cs; + int ret; + struct engine_mmio *mmio; + struct intel_gvt *gvt = vgpu->gvt; + int ring_id = req->engine->id; + int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; + + if (count == 0) + return 0; + + ret = req->engine->emit_flush(req, EMIT_BARRIER); + if (ret) + return ret; + + cs = intel_ring_begin(req, count * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(count); + for (mmio = gvt->engine_mmio_list.mmio; + i915_mmio_reg_valid(mmio->reg); mmio++) { + if (mmio->ring_id != ring_id || + !mmio->in_context) + continue; + + *cs++ = i915_mmio_reg_offset(mmio->reg); + *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | + (mmio->mask << 16); + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", + *(cs-2), *(cs-1), vgpu->id, ring_id); + } + + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + ret = req->engine->emit_flush(req, EMIT_BARRIER); + if (ret) + return ret; + + return 0; +} + +static int +restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, + struct i915_request *req) +{ + unsigned int index; + u32 *cs; + + cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); + + for (index = 0; index < GEN9_MOCS_SIZE; index++) { + *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); + *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", + *(cs-2), *(cs-1), vgpu->id, req->engine->id); + + } + + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + +static int +restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, + struct i915_request *req) +{ + unsigned int index; + u32 *cs; + + cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); + + for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { + *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); + *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); + gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", + *(cs-2), *(cs-1), vgpu->id, req->engine->id); + + } + + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + +/* + * Use lri command to initialize the mmio which is in context state image for + * inhibit context, it contains tracked engine mmio, render_mocs and + * render_mocs_l3cc. + */ +int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, + struct i915_request *req) +{ + int ret; + u32 *cs; + + cs = intel_ring_begin(req, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + ret = restore_context_mmio_for_inhibit(vgpu, req); + if (ret) + goto out; + + /* no MOCS register in context except render engine */ + if (req->engine->id != RCS) + goto out; + + ret = restore_render_mocs_control_for_inhibit(vgpu, req); + if (ret) + goto out; + + ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); + if (ret) + goto out; + +out: + cs = intel_ring_begin(req, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return ret; +} + static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -252,11 +401,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; + if (IS_KABYLAKE(dev_priv) && ring_id == RCS) + return; + if (!pre && !gen9_render_mocs.initialized) load_render_mocs(dev_priv); offset.reg = regs[ring_id]; - for (i = 0; i < 64; i++) { + for (i = 0; i < GEN9_MOCS_SIZE; i++) { if (pre) old_v = vgpu_vreg_t(pre, offset); else @@ -274,7 +426,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (ring_id == RCS) { l3_offset.reg = 0xb020; - for (i = 0; i < 32; i++) { + for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) old_v = vgpu_vreg_t(pre, l3_offset); else @@ -294,6 +446,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, #define CTX_CONTEXT_CONTROL_VAL 0x03 +bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) +{ + u32 *reg_state = ctx->engine[ring_id].lrc_reg_state; + u32 inhibit_mask = + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + + return inhibit_mask == + (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); +} + /* Switch ring mmio values (context). */ static void switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, @@ -301,9 +463,6 @@ static void switch_mmio(struct intel_vgpu *pre, { struct drm_i915_private *dev_priv; struct intel_vgpu_submission *s; - u32 *reg_state, ctx_ctrl; - u32 inhibit_mask = - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); struct engine_mmio *mmio; u32 old_v, new_v; @@ -311,10 +470,18 @@ static void switch_mmio(struct intel_vgpu *pre, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) switch_mocs(pre, next, ring_id); - for (mmio = dev_priv->gvt->engine_mmio_list; + for (mmio = dev_priv->gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->ring_id != ring_id) continue; + /* + * No need to do save or restore of the mmio which is in context + * state image on kabylake, it's initialized by lri command and + * save or restore with context together. + */ + if (IS_KABYLAKE(dev_priv) && mmio->in_context) + continue; + // save if (pre) { vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); @@ -328,16 +495,13 @@ static void switch_mmio(struct intel_vgpu *pre, // restore if (next) { s = &next->submission; - reg_state = - s->shadow_ctx->engine[ring_id].lrc_reg_state; - ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; /* - * if it is an inhibit context, load in_context mmio - * into HW by mmio write. If it is not, skip this mmio - * write. + * No need to restore the mmio which is in context state + * image if it's not inhibit context, it will restore + * itself. */ if (mmio->in_context && - (ctx_ctrl & inhibit_mask) != inhibit_mask) + !is_inhibit_context(s->shadow_ctx, ring_id)) continue; if (mmio->mask) @@ -408,8 +572,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, */ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { + struct engine_mmio *mmio; + if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) - gvt->engine_mmio_list = gen9_engine_mmio_list; + gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; else - gvt->engine_mmio_list = gen8_engine_mmio_list; + gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; + + for (mmio = gvt->engine_mmio_list.mmio; + i915_mmio_reg_valid(mmio->reg); mmio++) { + if (mmio->in_context) + gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; + } } |