diff options
author | Zhenyu Wang <zhenyuw@linux.intel.com> | 2019-04-16 16:50:34 +0800 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2019-04-16 16:50:34 +0800 |
commit | 95d002e0a34cb0f238abb39987f9980f325d8332 (patch) | |
tree | b8ed70572a55b4e67410f906159f86e8aabb0f6a /drivers/gpu/drm/i915 | |
parent | d57b39e3ee3cdb4b00452090e386d197980cefc9 (diff) | |
parent | 28d618e9ab86f26a31af0b235ced55beb3e343c8 (diff) |
Merge tag 'drm-intel-next-2019-04-04' into gvt-next
Merge back drm-intel-next for engine name definition refinement
and 54939ea0bd85 ("drm/i915: Switch to use HWS indices rather than addresses")
that would need gvt fixes to depend on.
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
224 files changed, 29109 insertions, 17956 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 33a458b7f1fc..148be8e1a090 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -131,5 +131,5 @@ config DRM_I915_GVT_KVMGT menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT -source drivers/gpu/drm/i915/Kconfig.debug +source "drivers/gpu/drm/i915/Kconfig.debug" endmenu diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 9e36ffb5eb7c..ad4d71161dda 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -21,11 +21,11 @@ config DRM_I915_DEBUG select DEBUG_FS select PREEMPT_COUNT select I2C_CHARDEV + select STACKDEPOT select DRM_DP_AUX_CHARDEV select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y - select STACKDEPOT if DRM=y # for DRM_DEBUG_MM select DRM_DEBUG_SELFTEST select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM bool "Enable extra state checking for runtime PM" depends on DRM_I915 default n + select STACKDEPOT help Choose this option to turn on extra state checking for the runtime PM functionality. This may introduce overhead during diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 63893fe00711..30bf3301ea24 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) +subdir-ccflags-y += $(call cc-disable-warning, uninitialized) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable @@ -40,10 +41,12 @@ i915-y := i915_drv.o \ i915_mm.o \ i915_params.o \ i915_pci.o \ - i915_suspend.o \ - i915_syncmap.o \ + i915_reset.o \ + i915_suspend.o \ i915_sw_fence.o \ + i915_syncmap.o \ i915_sysfs.o \ + i915_user_extensions.o \ intel_csr.o \ intel_device_info.o \ intel_pm.o \ @@ -54,8 +57,21 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o +# Test the headers are compilable as standalone units +i915-$(CONFIG_DRM_I915_WERROR) += \ + test_i915_active_types_standalone.o \ + test_i915_gem_context_types_standalone.o \ + test_i915_priolist_types_standalone.o \ + test_i915_scheduler_types_standalone.o \ + test_i915_timeline_types_standalone.o \ + test_intel_context_types_standalone.o \ + test_intel_engine_types_standalone.o \ + test_intel_workarounds_types_standalone.o + # GEM code -i915-y += i915_cmd_parser.o \ +i915-y += \ + i915_active.o \ + i915_cmd_parser.o \ i915_gem_batch_pool.o \ i915_gem_clflush.o \ i915_gem_context.o \ @@ -73,6 +89,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_tiling.o \ i915_gem_userptr.o \ i915_gemfs.o \ + i915_globals.o \ i915_query.o \ i915_request.o \ i915_scheduler.o \ @@ -80,6 +97,7 @@ i915-y += i915_cmd_parser.o \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ + intel_context.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ @@ -166,6 +184,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/i915_random.o \ selftests/i915_selftest.o \ selftests/igt_flush_test.o \ + selftests/igt_live_test.o \ selftests/igt_reset.o \ selftests/igt_spinner.o diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 5e6a3013da49..16e0345b711f 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -24,7 +24,6 @@ #define _INTEL_DVO_H #include <linux/i2c.h> -#include <drm/drmP.h> #include <drm/drm_crtc.h> #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index fe754022e356..1fa2f65c3cd1 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) } mutex_lock(&dev_priv->drm.struct_mutex); + mmio_hw_access_pre(dev_priv); ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); + mmio_hw_access_post(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", @@ -178,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) @@ -204,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -217,7 +219,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return -ENOSPC; } @@ -315,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c53dbdbfeaa7..57a92a86c63e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -391,12 +391,12 @@ struct cmd_info { #define F_POST_HANDLE (1<<2) u32 flag; -#define R_RCS (1 << RCS) -#define R_VCS1 (1 << VCS) -#define R_VCS2 (1 << VCS2) +#define R_RCS BIT(RCS0) +#define R_VCS1 BIT(VCS0) +#define R_VCS2 BIT(VCS1) #define R_VCS (R_VCS1 | R_VCS2) -#define R_BCS (1 << BCS) -#define R_VECS (1 << VECS) +#define R_BCS BIT(BCS0) +#define R_VECS BIT(VECS0) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) /* rings that support this cmd: BLT/RCS/VCS/VECS */ u16 rings; @@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = { }; static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { - [RCS] = { + [RCS0] = { &decode_info_mi, NULL, NULL, @@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS] = { + [VCS0] = { &decode_info_mi, NULL, NULL, @@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [BCS] = { + [BCS0] = { &decode_info_mi, NULL, &decode_info_2d, @@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VECS] = { + [VECS0] = { &decode_info_mi, NULL, NULL, @@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS2] = { + [VCS1] = { &decode_info_mi, NULL, NULL, @@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e; hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { - if ((opcode == e->info->opcode) && - (e->info->rings & (1 << ring_id))) + if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) return e->info; } return NULL; @@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s) struct intel_gvt *gvt = s->vgpu->gvt; for (i = 1; i < cmd_len; i += 2) { - if (IS_BROADWELL(gvt->dev_priv) && - (s->ring_id != RCS)) { - if (s->ring_id == BCS && - cmd_reg(s, i) == - i915_mmio_reg_offset(DERRMR)) + if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { + if (s->ring_id == BCS0 && + cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) ret |= 0; else - ret |= (cmd_reg_inhibit(s, i)) ? - -EBADRQC : 0; + ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; } if (ret) break; @@ -1047,27 +1043,27 @@ struct cmd_interrupt_event { }; static struct cmd_interrupt_event cmd_interrupt_events[] = { - [RCS] = { + [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, .mi_user_interrupt = RCS_MI_USER_INTERRUPT, }, - [BCS] = { + [BCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = BCS_MI_FLUSH_DW, .mi_user_interrupt = BCS_MI_USER_INTERRUPT, }, - [VCS] = { + [VCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS_MI_FLUSH_DW, .mi_user_interrupt = VCS_MI_USER_INTERRUPT, }, - [VCS2] = { + [VCS1] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS2_MI_FLUSH_DW, .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, }, - [VECS] = { + [VECS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VECS_MI_FLUSH_DW, .mi_user_interrupt = VECS_MI_USER_INTERRUPT, diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..f27edf17b4ab 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 70494e394d2c..f21b8fb5b37e 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -47,17 +47,16 @@ ((a)->lrca == (b)->lrca)) static int context_switch_events[] = { - [RCS] = RCS_AS_CONTEXT_SWITCH, - [BCS] = BCS_AS_CONTEXT_SWITCH, - [VCS] = VCS_AS_CONTEXT_SWITCH, - [VCS2] = VCS2_AS_CONTEXT_SWITCH, - [VECS] = VECS_AS_CONTEXT_SWITCH, + [RCS0] = RCS_AS_CONTEXT_SWITCH, + [BCS0] = BCS_AS_CONTEXT_SWITCH, + [VCS0] = VCS_AS_CONTEXT_SWITCH, + [VCS1] = VCS2_AS_CONTEXT_SWITCH, + [VECS0] = VECS_AS_CONTEXT_SWITCH, }; -static int ring_id_to_context_switch_event(int ring_id) +static int ring_id_to_context_switch_event(unsigned int ring_id) { - if (WARN_ON(ring_id < RCS || - ring_id >= ARRAY_SIZE(context_switch_events))) + if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[ring_id]; @@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) + if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) goto out; if (!list_empty(workload_q_head(vgpu, ring_id))) { @@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) +static void clean_execlist(struct intel_vgpu *vgpu, + intel_engine_mask_t engine_mask) { - unsigned int tmp; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; struct intel_vgpu_submission *s = &vgpu->submission; + intel_engine_mask_t tmp; for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); @@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) } static void reset_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; - unsigned int tmp; + intel_engine_mask_t tmp; for_each_engine_masked(engine, dev_priv, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } static int init_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { reset_execlist(vgpu, engine_mask); return 0; diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 714d709829a2..5ccc2c695848 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); #endif /*_GVT_EXECLIST_H_*/ diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index dbd91ef28886..65e847392aea 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -231,7 +231,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = !!(val & DISPPLANE_TILED); + plane->tiled = val & DISPPLANE_TILED; fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 115dc6829c26..9ba1ed8176e2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2447,10 +2447,11 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) { - struct intel_gvt_partial_pte *pos; + struct intel_gvt_partial_pte *pos, *next; - list_for_each_entry(pos, - &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) { + list_for_each_entry_safe(pos, next, + &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, + list) { gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", pos->offset, pos->data); kfree(pos); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 05e9c2dd6183..f5a328b5290a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -141,9 +141,9 @@ enum { struct intel_vgpu_submission_ops { const char *name; - int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); - void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); - void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); + int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); + void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); + void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); }; struct intel_vgpu_submission { @@ -481,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, - unsigned int engine_mask); + intel_engine_mask_t engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); @@ -589,7 +589,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 1a343f99b3b4..18f01eeb2510 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int engine_mask = 0; + intel_engine_mask_t engine_mask = 0; u32 data; write_vreg(vgpu, offset, p_data, bytes); @@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } else { if (data & GEN6_GRDOM_RENDER) { gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - engine_mask |= (1 << RCS); + engine_mask |= BIT(RCS0); } if (data & GEN6_GRDOM_MEDIA) { gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - engine_mask |= (1 << VCS); + engine_mask |= BIT(VCS0); } if (data & GEN6_GRDOM_BLT) { gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - engine_mask |= (1 << BCS); + engine_mask |= BIT(BCS0); } if (data & GEN6_GRDOM_VECS) { gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - engine_mask |= (1 << VECS); + engine_mask |= BIT(VECS0); } if (data & GEN8_GRDOM_MEDIA2) { gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - engine_mask |= (1 << VCS2); + engine_mask |= BIT(VCS1); } + engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ @@ -1729,7 +1729,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; ret = intel_vgpu_select_submission_ops(vgpu, - ENGINE_MASK(ring_id), + BIT(ring_id), INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -1749,19 +1749,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, switch (offset) { case 0x4260: - id = RCS; + id = RCS0; break; case 0x4264: - id = VCS; + id = VCS0; break; case 0x4268: - id = VCS2; + id = VCS1; break; case 0x426c: - id = BCS; + id = BCS0; break; case 0x4270: - id = VECS; + id = VECS0; break; default: return -EINVAL; @@ -1818,7 +1818,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ - if (HAS_BSD2(dev_priv)) \ + if (HAS_ENGINE(dev_priv, VCS1)) \ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) @@ -1873,7 +1873,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); - MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); + MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); @@ -2836,6 +2836,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5e01cc8d9b16..4862fb12778e 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,7 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev); int (*attach_vgpu)(void *vgpu, unsigned long *handle); - void (*detach_vgpu)(unsigned long handle); + void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 67125c5eec6e..951681813230 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -536,7 +536,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); - if (HAS_BSD2(gvt->dev_priv)) { + if (HAS_ENGINE(gvt->dev_priv, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 63eef86a2a85..d5fcc447d22f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1147,7 +1147,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1165,7 +1165,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } @@ -1813,9 +1823,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) return 0; } -static void kvmgt_detach_vgpu(unsigned long handle) +static void kvmgt_detach_vgpu(void *p_vgpu) { - /* nothing to do here */ + int i; + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + if (!vgpu->vdev.region) + return; + + for (i = 0; i < vgpu->vdev.num_regions; i++) + if (vgpu->vdev.region[i].ops->release) + vgpu->vdev.region[i].ops->release(vgpu, + &vgpu->vdev.region[i]); + vgpu->vdev.num_regions = 0; + kfree(vgpu->vdev.region); + vgpu->vdev.region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 893de7267b1f..76630fbe51b6 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -41,102 +41,102 @@ /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ - {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ - {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ - {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ - {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ - {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ - {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ - {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ - {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ - {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ - {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ - {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ - {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ - {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ - {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ - {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */ - {RCS, TRVADR, 0, false}, /* 0x4df0 */ - {RCS, TRTTE, 0, false}, /* 0x4df4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - - {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ - - {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ - - {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ - {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ - - {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ - - {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ - {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ - {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ + {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ + {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ + {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ + {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ + {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ + {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ + {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ + {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ + {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ + {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ + {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */ + {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */ + {RCS0, TRVADR, 0, false}, /* 0x4df0 */ + {RCS0, TRTTE, 0, false}, /* 0x4df4 */ + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ + + {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ + + {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ + + {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ + {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ + + {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ + {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ + + {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ + {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ + {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { @@ -149,15 +149,17 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) { i915_reg_t offset; u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, }; int ring_id, i; for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { + if (!HAS_ENGINE(dev_priv, ring_id)) + continue; offset.reg = regs[ring_id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = @@ -299,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, goto out; /* no MOCS register in context except render engine */ - if (req->engine->id != RCS) + if (req->engine->id != RCS0) goto out; ret = restore_render_mocs_control_for_inhibit(vgpu, req); @@ -325,15 +327,16 @@ out: static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_uncore *uncore = &dev_priv->uncore; struct intel_vgpu_submission *s = &vgpu->submission; enum forcewake_domains fw; i915_reg_t reg; u32 regs[] = { - [RCS] = 0x4260, - [VCS] = 0x4264, - [VCS2] = 0x4268, - [BCS] = 0x426c, - [VECS] = 0x4270, + [RCS0] = 0x4260, + [VCS0] = 0x4264, + [VCS1] = 0x4268, + [BCS0] = 0x426c, + [VECS0] = 0x4270, }; if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) @@ -349,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) * otherwise device can go to RC6 state and interrupt invalidation * process */ - fw = intel_uncore_forcewake_for_reg(dev_priv, reg, + fw = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9)) + if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) fw |= FORCEWAKE_RENDER; - intel_uncore_forcewake_get(dev_priv, fw); + intel_uncore_forcewake_get(uncore, fw); - I915_WRITE_FW(reg, 0x1); + intel_uncore_write_fw(uncore, reg, 0x1); - if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) + if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else vgpu_vreg_t(vgpu, reg) = 0; - intel_uncore_forcewake_put(dev_priv, fw); + intel_uncore_forcewake_put(uncore, fw); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); } @@ -376,11 +379,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, u32 old_v, new_v; u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, }; int i; @@ -388,8 +391,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) - || IS_COFFEELAKE(dev_priv)) && ring_id == RCS) + if (ring_id == RCS0 && + (IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv) || + IS_COFFEELAKE(dev_priv))) return; if (!pre && !gen9_render_mocs.initialized) @@ -412,7 +417,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, offset.reg += 4; } - if (ring_id == RCS) { + if (ring_id == RCS0) { l3_offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) @@ -490,7 +495,8 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) + !is_inhibit_context(intel_context_lookup(s->shadow_ctx, + dev_priv->engine[ring_id]))) continue; if (mmio->mask) @@ -547,9 +553,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } /** diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 5d8b8f228d8f..0f9440128123 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -99,7 +99,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) if (!intel_gvt_host.mpt->detach_vgpu) return; - intel_gvt_host.mpt->detach_vgpu(vgpu->handle); + intel_gvt_host.mpt->detach_vgpu(vgpu); } #define MSI_CAP_CONTROL(offset) (offset + 2) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 951cfee85902..1c763a27a412 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index fb7445baa139..b385edbeaa30 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, i915_mmio_reg_offset(EU_PERF_CNTL6), }; - if (workload->ring_id != RCS) + if (workload->ring_id != RCS0) return; if (save) { @@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); - if (ring_id == RCS) { + if (ring_id == RCS0) { COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); @@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) context_page_num = 19; i = 2; @@ -333,6 +333,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); + + wa_ctx->indirect_ctx.obj = NULL; + wa_ctx->indirect_ctx.shadow_va = NULL; } static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, @@ -357,6 +360,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, return 0; } +static int +intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct i915_request *rq; + int ret = 0; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (workload->req) + goto out; + + rq = i915_request_alloc(engine, shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); + goto out; + } + workload->req = i915_request_get(rq); +out: + return ret; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -373,12 +403,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; struct intel_context *ce; - struct i915_request *rq; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) + if (workload->shadow) return 0; ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); @@ -411,29 +440,14 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (ret) goto err_unpin; - if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { + if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; } - rq = i915_request_alloc(engine, shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto err_shadow; - } - workload->req = i915_request_get(rq); - - ret = populate_shadow_context(workload); - if (ret) - goto err_req; - + workload->shadow = true; return 0; -err_req: - rq = fetch_and_zero(&workload->req); - i915_request_put(rq); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); err_unpin: @@ -672,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); + ret = intel_gvt_workload_req_alloc(workload); + if (ret) + goto err_req; + ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; - ret = prepare_workload(workload); + ret = populate_shadow_context(workload); + if (ret) { + release_shadow_wa_ctx(&workload->wa_ctx); + goto out; + } + ret = prepare_workload(workload); out: - if (ret) - workload->status = ret; - if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); i915_request_add(workload->req); workload->dispatched = true; } - +err_req: + if (ret) + workload->status = ret; mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; @@ -768,7 +790,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) context_page_num = 19; i = 2; @@ -816,13 +838,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload) } void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; - unsigned int tmp; + intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { @@ -868,8 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - if (!workload->status && !(vgpu->resetting_eng & - ENGINE_MASK(ring_id))) { + if (!workload->status && + !(vgpu->resetting_eng & BIT(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, @@ -892,12 +914,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); - if (!workload->status) { - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); - } - - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, * and GVT won't inject context switch interrupt to guest. @@ -911,7 +928,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) * cleaned up during the resetting process later, so doing * the workload clean up here doesn't have any impact. **/ - intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id)); + intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); } workload->complete(workload); @@ -971,7 +988,7 @@ static int workload_thread(void *priv) workload->ring_id, workload); if (need_force_wake) - intel_uncore_forcewake_get(gvt->dev_priv, + intel_uncore_forcewake_get(&gvt->dev_priv->uncore, FORCEWAKE_ALL); ret = dispatch_workload(workload); @@ -993,10 +1010,10 @@ complete: complete_current_workload(gvt, ring_id); if (need_force_wake) - intel_uncore_forcewake_put(gvt->dev_priv, + intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(gvt->dev_priv); + intel_runtime_pm_put_unchecked(gvt->dev_priv); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1084,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) + if (i915_vm_is_4lvl(&i915_ppgtt->vm)) { px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; - else { + } else { for (i = 0; i < GEN8_3LVL_PDPES; i++) px_dma(i915_ppgtt->pdp.page_directory[i]) = s->i915_context_pdps[i]; @@ -1120,7 +1137,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) * */ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, - unsigned long engine_mask) + intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1137,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) + if (i915_vm_is_4lvl(&i915_ppgtt->vm)) s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); else { for (i = 0; i < GEN8_3LVL_PDPES; i++) @@ -1210,7 +1227,7 @@ out_shadow_ctx: * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, - unsigned long engine_mask, + intel_engine_mask_t engine_mask, unsigned int interface) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1262,6 +1279,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + if (workload->shadow_mm) intel_vgpu_mm_put(workload->shadow_mm); @@ -1417,7 +1437,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->rb_start = start; workload->rb_ctl = ctl; - if (ring_id == RCS) { + if (ring_id == RCS0) { intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + @@ -1450,7 +1470,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } if (ret && (vgpu_is_vm_unhealthy(ret))) { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 1e9eec6a32fe..90c6756f5453 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,6 +83,7 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; + bool shadow; /* if workload has done shadow of guest request */ int status; struct intel_vgpu_mm *shadow_mm; @@ -141,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, - unsigned long engine_mask, + intel_engine_mask_t engine_mask, unsigned int interface); extern const struct intel_vgpu_submission_ops @@ -159,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, - unsigned long engine_mask); + intel_engine_mask_t engine_mask); #endif diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 39682b065a71..44ce3c2b9ac1 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; - vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; @@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, high_avail / vgpu_types[i].high_mm); - if (IS_GEN8(gvt->dev_priv)) + if (IS_GEN(gvt->dev_priv, 8)) sprintf(gvt->types[i].name, "GVTg_V4_%s", vgpu_types[i].name); - else if (IS_GEN9(gvt->dev_priv)) + else if (IS_GEN(gvt->dev_priv, 9)) sprintf(gvt->types[i].name, "GVTg_V5_%s", vgpu_types[i].name); @@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, * GPU engines. For FLR, engine_mask is ignored. */ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, - unsigned int engine_mask) + intel_engine_mask_t engine_mask) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask; gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c new file mode 100644 index 000000000000..863ae12707ba --- /dev/null +++ b/drivers/gpu/drm/i915/i915_active.c @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_active.h" +#include "i915_globals.h" + +#define BKL(ref) (&(ref)->i915->drm.struct_mutex) + +/* + * Active refs memory management + * + * To be more economical with memory, we reap all the i915_active trees as + * they idle (when we know the active requests are inactive) and allocate the + * nodes from a local slab cache to hopefully reduce the fragmentation. + */ +static struct i915_global_active { + struct i915_global base; + struct kmem_cache *slab_cache; +} global; + +struct active_node { + struct i915_active_request base; + struct i915_active *ref; + struct rb_node node; + u64 timeline; +}; + +static void +__active_park(struct i915_active *ref) +{ + struct active_node *it, *n; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + GEM_BUG_ON(i915_active_request_isset(&it->base)); + kmem_cache_free(global.slab_cache, it); + } + ref->tree = RB_ROOT; +} + +static void +__active_retire(struct i915_active *ref) +{ + GEM_BUG_ON(!ref->count); + if (--ref->count) + return; + + /* return the unused nodes to our slabcache */ + __active_park(ref); + + ref->retire(ref); +} + +static void +node_retire(struct i915_active_request *base, struct i915_request *rq) +{ + __active_retire(container_of(base, struct active_node, base)->ref); +} + +static void +last_retire(struct i915_active_request *base, struct i915_request *rq) +{ + __active_retire(container_of(base, struct i915_active, last)); +} + +static struct i915_active_request * +active_instance(struct i915_active *ref, u64 idx) +{ + struct active_node *node; + struct rb_node **p, *parent; + struct i915_request *old; + + /* + * We track the most recently used timeline to skip a rbtree search + * for the common case, under typical loads we never need the rbtree + * at all. We can reuse the last slot if it is empty, that is + * after the previous activity has been retired, or if it matches the + * current timeline. + * + * Note that we allow the timeline to be active simultaneously in + * the rbtree and the last cache. We do this to avoid having + * to search and replace the rbtree element for a new timeline, with + * the cost being that we must be aware that the ref may be retired + * twice for the same timeline (as the older rbtree element will be + * retired before the new request added to last). + */ + old = i915_active_request_raw(&ref->last, BKL(ref)); + if (!old || old->fence.context == idx) + goto out; + + /* Move the currently active fence into the rbtree */ + idx = old->fence.context; + + parent = NULL; + p = &ref->tree.rb_node; + while (*p) { + parent = *p; + + node = rb_entry(parent, struct active_node, node); + if (node->timeline == idx) + goto replace; + + if (node->timeline < idx) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); + + /* kmalloc may retire the ref->last (thanks shrinker)! */ + if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) { + kmem_cache_free(global.slab_cache, node); + goto out; + } + + if (unlikely(!node)) + return ERR_PTR(-ENOMEM); + + i915_active_request_init(&node->base, NULL, node_retire); + node->ref = ref; + node->timeline = idx; + + rb_link_node(&node->node, parent, p); + rb_insert_color(&node->node, &ref->tree); + +replace: + /* + * Overwrite the previous active slot in the rbtree with last, + * leaving last zeroed. If the previous slot is still active, + * we must be careful as we now only expect to receive one retire + * callback not two, and so much undo the active counting for the + * overwritten slot. + */ + if (i915_active_request_isset(&node->base)) { + /* Retire ourselves from the old rq->active_list */ + __list_del_entry(&node->base.link); + ref->count--; + GEM_BUG_ON(!ref->count); + } + GEM_BUG_ON(list_empty(&ref->last.link)); + list_replace_init(&ref->last.link, &node->base.link); + node->base.request = fetch_and_zero(&ref->last.request); + +out: + return &ref->last; +} + +void i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + void (*retire)(struct i915_active *ref)) +{ + ref->i915 = i915; + ref->retire = retire; + ref->tree = RB_ROOT; + i915_active_request_init(&ref->last, NULL, last_retire); + ref->count = 0; +} + +int i915_active_ref(struct i915_active *ref, + u64 timeline, + struct i915_request *rq) +{ + struct i915_active_request *active; + int err = 0; + + /* Prevent reaping in case we malloc/wait while building the tree */ + i915_active_acquire(ref); + + active = active_instance(ref, timeline); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto out; + } + + if (!i915_active_request_isset(active)) + ref->count++; + __i915_active_request_set(active, rq); + + GEM_BUG_ON(!ref->count); +out: + i915_active_release(ref); + return err; +} + +bool i915_active_acquire(struct i915_active *ref) +{ + lockdep_assert_held(BKL(ref)); + return !ref->count++; +} + +void i915_active_release(struct i915_active *ref) +{ + lockdep_assert_held(BKL(ref)); + __active_retire(ref); +} + +int i915_active_wait(struct i915_active *ref) +{ + struct active_node *it, *n; + int ret = 0; + + if (i915_active_acquire(ref)) + goto out_release; + + ret = i915_active_request_retire(&ref->last, BKL(ref)); + if (ret) + goto out_release; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + ret = i915_active_request_retire(&it->base, BKL(ref)); + if (ret) + break; + } + +out_release: + i915_active_release(ref); + return ret; +} + +int i915_request_await_active_request(struct i915_request *rq, + struct i915_active_request *active) +{ + struct i915_request *barrier = + i915_active_request_raw(active, &rq->i915->drm.struct_mutex); + + return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0; +} + +int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) +{ + struct active_node *it, *n; + int err = 0; + + /* await allocates and so we need to avoid hitting the shrinker */ + if (i915_active_acquire(ref)) + goto out; /* was idle */ + + err = i915_request_await_active_request(rq, &ref->last); + if (err) + goto out; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + err = i915_request_await_active_request(rq, &it->base); + if (err) + goto out; + } + +out: + i915_active_release(ref); + return err; +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +void i915_active_fini(struct i915_active *ref) +{ + GEM_BUG_ON(i915_active_request_isset(&ref->last)); + GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); + GEM_BUG_ON(ref->count); +} +#endif + +int i915_active_request_set(struct i915_active_request *active, + struct i915_request *rq) +{ + int err; + + /* Must maintain ordering wrt previous active requests */ + err = i915_request_await_active_request(rq, active); + if (err) + return err; + + __i915_active_request_set(active, rq); + return 0; +} + +void i915_active_retire_noop(struct i915_active_request *active, + struct i915_request *request) +{ + /* Space left intentionally blank */ +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_active.c" +#endif + +static void i915_global_active_shrink(void) +{ + kmem_cache_shrink(global.slab_cache); +} + +static void i915_global_active_exit(void) +{ + kmem_cache_destroy(global.slab_cache); +} + +static struct i915_global_active global = { { + .shrink = i915_global_active_shrink, + .exit = i915_global_active_exit, +} }; + +int __init i915_global_active_init(void) +{ + global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); + if (!global.slab_cache) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h new file mode 100644 index 000000000000..7d758719ce39 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_active.h @@ -0,0 +1,409 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_ACTIVE_H_ +#define _I915_ACTIVE_H_ + +#include <linux/lockdep.h> + +#include "i915_active_types.h" +#include "i915_request.h" + +/* + * We treat requests as fences. This is not be to confused with our + * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. + * We use the fences to synchronize access from the CPU with activity on the + * GPU, for example, we should not rewrite an object's PTE whilst the GPU + * is reading them. We also track fences at a higher level to provide + * implicit synchronisation around GEM objects, e.g. set-domain will wait + * for outstanding GPU rendering before marking the object ready for CPU + * access, or a pageflip will wait until the GPU is complete before showing + * the frame on the scanout. + * + * In order to use a fence, the object must track the fence it needs to + * serialise with. For example, GEM objects want to track both read and + * write access so that we can perform concurrent read operations between + * the CPU and GPU engines, as well as waiting for all rendering to + * complete, or waiting for the last GPU user of a "fence register". The + * object then embeds a #i915_active_request to track the most recent (in + * retirement order) request relevant for the desired mode of access. + * The #i915_active_request is updated with i915_active_request_set() to + * track the most recent fence request, typically this is done as part of + * i915_vma_move_to_active(). + * + * When the #i915_active_request completes (is retired), it will + * signal its completion to the owner through a callback as well as mark + * itself as idle (i915_active_request.request == NULL). The owner + * can then perform any action, such as delayed freeing of an active + * resource including itself. + */ + +void i915_active_retire_noop(struct i915_active_request *active, + struct i915_request *request); + +/** + * i915_active_request_init - prepares the activity tracker for use + * @active - the active tracker + * @rq - initial request to track, can be NULL + * @func - a callback when then the tracker is retired (becomes idle), + * can be NULL + * + * i915_active_request_init() prepares the embedded @active struct for use as + * an activity tracker, that is for tracking the last known active request + * associated with it. When the last request becomes idle, when it is retired + * after completion, the optional callback @func is invoked. + */ +static inline void +i915_active_request_init(struct i915_active_request *active, + struct i915_request *rq, + i915_active_retire_fn retire) +{ + RCU_INIT_POINTER(active->request, rq); + INIT_LIST_HEAD(&active->link); + active->retire = retire ?: i915_active_retire_noop; +} + +#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL) + +/** + * i915_active_request_set - updates the tracker to watch the current request + * @active - the active tracker + * @request - the request to watch + * + * __i915_active_request_set() watches the given @request for completion. Whilst + * that @request is busy, the @active reports busy. When that @request is + * retired, the @active tracker is updated to report idle. + */ +static inline void +__i915_active_request_set(struct i915_active_request *active, + struct i915_request *request) +{ + list_move(&active->link, &request->active_list); + rcu_assign_pointer(active->request, request); +} + +int __must_check +i915_active_request_set(struct i915_active_request *active, + struct i915_request *rq); + +/** + * i915_active_request_set_retire_fn - updates the retirement callback + * @active - the active tracker + * @fn - the routine called when the request is retired + * @mutex - struct_mutex used to guard retirements + * + * i915_active_request_set_retire_fn() updates the function pointer that + * is called when the final request associated with the @active tracker + * is retired. + */ +static inline void +i915_active_request_set_retire_fn(struct i915_active_request *active, + i915_active_retire_fn fn, + struct mutex *mutex) +{ + lockdep_assert_held(mutex); + active->retire = fn ?: i915_active_retire_noop; +} + +/** + * i915_active_request_raw - return the active request + * @active - the active tracker + * + * i915_active_request_raw() returns the current request being tracked, or NULL. + * It does not obtain a reference on the request for the caller, so the caller + * must hold struct_mutex. + */ +static inline struct i915_request * +i915_active_request_raw(const struct i915_active_request *active, + struct mutex *mutex) +{ + return rcu_dereference_protected(active->request, + lockdep_is_held(mutex)); +} + +/** + * i915_active_request_peek - report the active request being monitored + * @active - the active tracker + * + * i915_active_request_peek() returns the current request being tracked if + * still active, or NULL. It does not obtain a reference on the request + * for the caller, so the caller must hold struct_mutex. + */ +static inline struct i915_request * +i915_active_request_peek(const struct i915_active_request *active, + struct mutex *mutex) +{ + struct i915_request *request; + + request = i915_active_request_raw(active, mutex); + if (!request || i915_request_completed(request)) + return NULL; + + return request; +} + +/** + * i915_active_request_get - return a reference to the active request + * @active - the active tracker + * + * i915_active_request_get() returns a reference to the active request, or NULL + * if the active tracker is idle. The caller must hold struct_mutex. + */ +static inline struct i915_request * +i915_active_request_get(const struct i915_active_request *active, + struct mutex *mutex) +{ + return i915_request_get(i915_active_request_peek(active, mutex)); +} + +/** + * __i915_active_request_get_rcu - return a reference to the active request + * @active - the active tracker + * + * __i915_active_request_get() returns a reference to the active request, + * or NULL if the active tracker is idle. The caller must hold the RCU read + * lock, but the returned pointer is safe to use outside of RCU. + */ +static inline struct i915_request * +__i915_active_request_get_rcu(const struct i915_active_request *active) +{ + /* + * Performing a lockless retrieval of the active request is super + * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing + * slab of request objects will not be freed whilst we hold the + * RCU read lock. It does not guarantee that the request itself + * will not be freed and then *reused*. Viz, + * + * Thread A Thread B + * + * rq = active.request + * retire(rq) -> free(rq); + * (rq is now first on the slab freelist) + * active.request = NULL + * + * rq = new submission on a new object + * ref(rq) + * + * To prevent the request from being reused whilst the caller + * uses it, we take a reference like normal. Whilst acquiring + * the reference we check that it is not in a destroyed state + * (refcnt == 0). That prevents the request being reallocated + * whilst the caller holds on to it. To check that the request + * was not reallocated as we acquired the reference we have to + * check that our request remains the active request across + * the lookup, in the same manner as a seqlock. The visibility + * of the pointer versus the reference counting is controlled + * by using RCU barriers (rcu_dereference and rcu_assign_pointer). + * + * In the middle of all that, we inspect whether the request is + * complete. Retiring is lazy so the request may be completed long + * before the active tracker is updated. Querying whether the + * request is complete is far cheaper (as it involves no locked + * instructions setting cachelines to exclusive) than acquiring + * the reference, so we do it first. The RCU read lock ensures the + * pointer dereference is valid, but does not ensure that the + * seqno nor HWS is the right one! However, if the request was + * reallocated, that means the active tracker's request was complete. + * If the new request is also complete, then both are and we can + * just report the active tracker is idle. If the new request is + * incomplete, then we acquire a reference on it and check that + * it remained the active request. + * + * It is then imperative that we do not zero the request on + * reallocation, so that we can chase the dangling pointers! + * See i915_request_alloc(). + */ + do { + struct i915_request *request; + + request = rcu_dereference(active->request); + if (!request || i915_request_completed(request)) + return NULL; + + /* + * An especially silly compiler could decide to recompute the + * result of i915_request_completed, more specifically + * re-emit the load for request->fence.seqno. A race would catch + * a later seqno value, which could flip the result from true to + * false. Which means part of the instructions below might not + * be executed, while later on instructions are executed. Due to + * barriers within the refcounting the inconsistency can't reach + * past the call to i915_request_get_rcu, but not executing + * that while still executing i915_request_put() creates + * havoc enough. Prevent this with a compiler barrier. + */ + barrier(); + + request = i915_request_get_rcu(request); + + /* + * What stops the following rcu_access_pointer() from occurring + * before the above i915_request_get_rcu()? If we were + * to read the value before pausing to get the reference to + * the request, we may not notice a change in the active + * tracker. + * + * The rcu_access_pointer() is a mere compiler barrier, which + * means both the CPU and compiler are free to perform the + * memory read without constraint. The compiler only has to + * ensure that any operations after the rcu_access_pointer() + * occur afterwards in program order. This means the read may + * be performed earlier by an out-of-order CPU, or adventurous + * compiler. + * + * The atomic operation at the heart of + * i915_request_get_rcu(), see dma_fence_get_rcu(), is + * atomic_inc_not_zero() which is only a full memory barrier + * when successful. That is, if i915_request_get_rcu() + * returns the request (and so with the reference counted + * incremented) then the following read for rcu_access_pointer() + * must occur after the atomic operation and so confirm + * that this request is the one currently being tracked. + * + * The corresponding write barrier is part of + * rcu_assign_pointer(). + */ + if (!request || request == rcu_access_pointer(active->request)) + return rcu_pointer_handoff(request); + + i915_request_put(request); + } while (1); +} + +/** + * i915_active_request_get_unlocked - return a reference to the active request + * @active - the active tracker + * + * i915_active_request_get_unlocked() returns a reference to the active request, + * or NULL if the active tracker is idle. The reference is obtained under RCU, + * so no locking is required by the caller. + * + * The reference should be freed with i915_request_put(). + */ +static inline struct i915_request * +i915_active_request_get_unlocked(const struct i915_active_request *active) +{ + struct i915_request *request; + + rcu_read_lock(); + request = __i915_active_request_get_rcu(active); + rcu_read_unlock(); + + return request; +} + +/** + * i915_active_request_isset - report whether the active tracker is assigned + * @active - the active tracker + * + * i915_active_request_isset() returns true if the active tracker is currently + * assigned to a request. Due to the lazy retiring, that request may be idle + * and this may report stale information. + */ +static inline bool +i915_active_request_isset(const struct i915_active_request *active) +{ + return rcu_access_pointer(active->request); +} + +/** + * i915_active_request_retire - waits until the request is retired + * @active - the active request on which to wait + * + * i915_active_request_retire() waits until the request is completed, + * and then ensures that at least the retirement handler for this + * @active tracker is called before returning. If the @active + * tracker is idle, the function returns immediately. + */ +static inline int __must_check +i915_active_request_retire(struct i915_active_request *active, + struct mutex *mutex) +{ + struct i915_request *request; + long ret; + + request = i915_active_request_raw(active, mutex); + if (!request) + return 0; + + ret = i915_request_wait(request, + I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + return ret; + + list_del_init(&active->link); + RCU_INIT_POINTER(active->request, NULL); + + active->retire(active, request); + + return 0; +} + +/* + * GPU activity tracking + * + * Each set of commands submitted to the GPU compromises a single request that + * signals a fence upon completion. struct i915_request combines the + * command submission, scheduling and fence signaling roles. If we want to see + * if a particular task is complete, we need to grab the fence (struct + * i915_request) for that task and check or wait for it to be signaled. More + * often though we want to track the status of a bunch of tasks, for example + * to wait for the GPU to finish accessing some memory across a variety of + * different command pipelines from different clients. We could choose to + * track every single request associated with the task, but knowing that + * each request belongs to an ordered timeline (later requests within a + * timeline must wait for earlier requests), we need only track the + * latest request in each timeline to determine the overall status of the + * task. + * + * struct i915_active provides this tracking across timelines. It builds a + * composite shared-fence, and is updated as new work is submitted to the task, + * forming a snapshot of the current status. It should be embedded into the + * different resources that need to track their associated GPU activity to + * provide a callback when that GPU activity has ceased, or otherwise to + * provide a serialisation point either for request submission or for CPU + * synchronisation. + */ + +void i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + void (*retire)(struct i915_active *ref)); + +int i915_active_ref(struct i915_active *ref, + u64 timeline, + struct i915_request *rq); + +int i915_active_wait(struct i915_active *ref); + +int i915_request_await_active(struct i915_request *rq, + struct i915_active *ref); +int i915_request_await_active_request(struct i915_request *rq, + struct i915_active_request *active); + +bool i915_active_acquire(struct i915_active *ref); + +static inline void i915_active_cancel(struct i915_active *ref) +{ + GEM_BUG_ON(ref->count != 1); + ref->count = 0; +} + +void i915_active_release(struct i915_active *ref); + +static inline bool +i915_active_is_idle(const struct i915_active *ref) +{ + return !ref->count; +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +void i915_active_fini(struct i915_active *ref); +#else +static inline void i915_active_fini(struct i915_active *ref) { } +#endif + +#endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h new file mode 100644 index 000000000000..b679253b53a5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_ACTIVE_TYPES_H_ +#define _I915_ACTIVE_TYPES_H_ + +#include <linux/rbtree.h> +#include <linux/rcupdate.h> + +struct drm_i915_private; +struct i915_active_request; +struct i915_request; + +typedef void (*i915_active_retire_fn)(struct i915_active_request *, + struct i915_request *); + +struct i915_active_request { + struct i915_request __rcu *request; + struct list_head link; + i915_active_retire_fn retire; +}; + +struct i915_active { + struct drm_i915_private *i915; + + struct rb_root tree; + struct i915_active_request last; + unsigned int count; + + void (*retire)(struct i915_active *ref); +}; + +#endif /* _I915_ACTIVE_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 95478db9998b..503d548a55f7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -865,11 +865,11 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->i915)) + if (!IS_GEN(engine->i915, 7)) return; - switch (engine->id) { - case RCS: + switch (engine->class) { + case RENDER_CLASS: if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = @@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; - case VCS: + case VIDEO_DECODE_CLASS: cmd_tables = gen7_video_cmds; cmd_table_count = ARRAY_SIZE(gen7_video_cmds); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; - case BCS: + case COPY_ENGINE_CLASS: if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); @@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; - case VECS: + case VIDEO_ENHANCEMENT_CLASS: cmd_tables = hsw_vebox_cmds; cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; default: - MISSING_CASE(engine->id); + MISSING_CASE(engine->class); return; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 38dcee1ca062..4dd2d9ae3202 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -26,12 +26,15 @@ * */ -#include <linux/debugfs.h> #include <linux/sort.h> #include <linux/sched/mm.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_fourcc.h> #include "intel_drv.h" #include "intel_guc_submission.h" +#include "i915_reset.h" + static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { return to_i915(node->minor->dev); @@ -48,7 +51,7 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); intel_device_info_dump_flags(info, &p); - intel_device_info_dump_runtime(info, &p); + intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); intel_driver_caps_print(&dev_priv->caps, &p); kernel_param_lock(THIS_MODULE); @@ -157,14 +160,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, obj_link) { + list_for_each_entry(vma, &obj->vma.list, obj_link) { if (i915_vma_is_pinned(vma)) pin_count++; } seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_global) seq_printf(m, " (global)"); - list_for_each_entry(vma, &obj->vma_list, obj_link) { + list_for_each_entry(vma, &obj->vma.list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -204,7 +207,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) if (vma->fence) seq_printf(m, " , fence: %d%s", vma->fence->id, - i915_gem_active_isset(&vma->last_fence) ? "*" : ""); + i915_active_request_isset(&vma->last_fence) ? "*" : ""); seq_puts(m, ")"); } if (obj->stolen) @@ -297,11 +300,12 @@ out: } struct file_stats { - struct drm_i915_file_private *file_priv; + struct i915_address_space *vm; unsigned long count; u64 total, unbound; u64 global, shared; u64 active, inactive; + u64 closed; }; static int per_file_stats(int id, void *ptr, void *data) @@ -319,16 +323,14 @@ static int per_file_stats(int id, void *ptr, void *data) if (obj->base.name || obj->base.dma_buf) stats->shared += obj->base.size; - list_for_each_entry(vma, &obj->vma_list, obj_link) { + list_for_each_entry(vma, &obj->vma.list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; if (i915_vma_is_ggtt(vma)) { stats->global += vma->node.size; } else { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); - - if (ppgtt->vm.file != stats->file_priv) + if (vma->vm != stats->vm) continue; } @@ -336,6 +338,9 @@ static int per_file_stats(int id, void *ptr, void *data) stats->active += vma->node.size; else stats->inactive += vma->node.size; + + if (i915_vma_is_closed(vma)) + stats->closed += vma->node.size; } return 0; @@ -343,7 +348,7 @@ static int per_file_stats(int id, void *ptr, void *data) #define print_file_stats(m, name, stats) do { \ if (stats.count) \ - seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ + seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \ name, \ stats.count, \ stats.total, \ @@ -351,20 +356,19 @@ static int per_file_stats(int id, void *ptr, void *data) stats.inactive, \ stats.global, \ stats.shared, \ - stats.unbound); \ + stats.unbound, \ + stats.closed); \ } while (0) static void print_batch_pool_stats(struct seq_file *m, struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj; - struct file_stats stats; struct intel_engine_cs *engine; + struct file_stats stats = {}; enum intel_engine_id id; int j; - memset(&stats, 0, sizeof(stats)); - for_each_engine(engine, dev_priv, id) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { list_for_each_entry(obj, @@ -377,44 +381,43 @@ static void print_batch_pool_stats(struct seq_file *m, print_file_stats(m, "[k]batch pool", stats); } -static int per_file_ctx_stats(int idx, void *ptr, void *data) +static void print_context_stats(struct seq_file *m, + struct drm_i915_private *i915) { - struct i915_gem_context *ctx = ptr; - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct file_stats kstats = {}; + struct i915_gem_context *ctx; - for_each_engine(engine, ctx->i915, id) { - struct intel_context *ce = to_intel_context(ctx, engine); + list_for_each_entry(ctx, &i915->contexts.list, link) { + struct intel_context *ce; - if (ce->state) - per_file_stats(0, ce->state->obj, data); - if (ce->ring) - per_file_stats(0, ce->ring->vma->obj, data); - } - - return 0; -} + list_for_each_entry(ce, &ctx->active_engines, active_link) { + if (ce->state) + per_file_stats(0, ce->state->obj, &kstats); + if (ce->ring) + per_file_stats(0, ce->ring->vma->obj, &kstats); + } -static void print_context_stats(struct seq_file *m, - struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct file_stats stats; - struct drm_file *file; + if (!IS_ERR_OR_NULL(ctx->file_priv)) { + struct file_stats stats = { .vm = &ctx->ppgtt->vm, }; + struct drm_file *file = ctx->file_priv->file; + struct task_struct *task; + char name[80]; - memset(&stats, 0, sizeof(stats)); + spin_lock(&file->table_lock); + idr_for_each(&file->object_idr, per_file_stats, &stats); + spin_unlock(&file->table_lock); - mutex_lock(&dev->struct_mutex); - if (dev_priv->kernel_context) - per_file_ctx_stats(0, dev_priv->kernel_context, &stats); + rcu_read_lock(); + task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); + snprintf(name, sizeof(name), "%s", + task ? task->comm : "<unknown>"); + rcu_read_unlock(); - list_for_each_entry(file, &dev->filelist, lhead) { - struct drm_i915_file_private *fpriv = file->driver_priv; - idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); + print_file_stats(m, name, stats); + } } - mutex_unlock(&dev->struct_mutex); - print_file_stats(m, "[k]contexts", stats); + print_file_stats(m, "[k]contexts", kstats); } static int i915_gem_object_info(struct seq_file *m, void *data) @@ -426,14 +429,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data) u64 size, mapped_size, purgeable_size, dpy_size, huge_size; struct drm_i915_gem_object *obj; unsigned int page_sizes = 0; - struct drm_file *file; char buf[80]; int ret; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - seq_printf(m, "%u objects, %llu bytes\n", dev_priv->mm.object_count, dev_priv->mm.object_memory); @@ -514,43 +512,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data) buf, sizeof(buf))); seq_putc(m, '\n'); - print_batch_pool_stats(m, dev_priv); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&dev->filelist_mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + print_batch_pool_stats(m, dev_priv); print_context_stats(m, dev_priv); - list_for_each_entry_reverse(file, &dev->filelist, lhead) { - struct file_stats stats; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_request *request; - struct task_struct *task; - - mutex_lock(&dev->struct_mutex); - - memset(&stats, 0, sizeof(stats)); - stats.file_priv = file->driver_priv; - spin_lock(&file->table_lock); - idr_for_each(&file->object_idr, per_file_stats, &stats); - spin_unlock(&file->table_lock); - /* - * Although we have a valid reference on file->pid, that does - * not guarantee that the task_struct who called get_pid() is - * still alive (e.g. get_pid(current) => fork() => exit()). - * Therefore, we need to protect this ->comm access using RCU. - */ - request = list_first_entry_or_null(&file_priv->mm.request_list, - struct i915_request, - client_link); - rcu_read_lock(); - task = pid_task(request && request->gem_context->pid ? - request->gem_context->pid : file->pid, - PIDTYPE_PID); - print_file_stats(m, task ? task->comm : "<unknown>", stats); - rcu_read_unlock(); - - mutex_unlock(&dev->struct_mutex); - } - mutex_unlock(&dev->filelist_mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -656,10 +625,12 @@ static void gen8_display_interrupt_info(struct seq_file *m) for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!wakeref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -674,7 +645,7 @@ static void gen8_display_interrupt_info(struct seq_file *m) pipe_name(pipe), I915_READ(GEN8_DE_PIPE_IER(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", @@ -704,11 +675,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int i, pipe; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (IS_CHERRYVIEW(dev_priv)) { + intel_wakeref_t pref; + seq_printf(m, "Master Interrupt Control:\t%08x\n", I915_READ(GEN8_MASTER_IRQ)); @@ -724,8 +698,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) enum intel_display_power_domain power_domain; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + pref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!pref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -735,17 +710,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data) pipe_name(pipe), I915_READ(PIPESTAT(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, pref); } - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); seq_printf(m, "Port hotplug:\t%08x\n", I915_READ(PORT_HOTPLUG_EN)); seq_printf(m, "DPFLIPSTAT:\t%08x\n", I915_READ(VLV_DPFLIPSTAT)); seq_printf(m, "DPINVGTT:\t%08x\n", I915_READ(DPINVGTT)); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); for (i = 0; i < 4; i++) { seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", @@ -808,10 +783,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(VLV_IMR)); for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; + intel_wakeref_t pref; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + pref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!pref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -820,7 +797,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Pipe %c stat:\t%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, pref); } seq_printf(m, "Master IER:\t%08x\n", @@ -903,11 +880,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) for_each_engine(engine, dev_priv, id) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", - engine->name, I915_READ_IMR(engine)); + engine->name, ENGINE_READ(engine, RING_IMR)); } } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -980,12 +957,13 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; struct i915_gpu_state *gpu; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); - gpu = i915_capture_gpu_state(i915); - intel_runtime_pm_put(i915); - if (!gpu) - return -ENOMEM; + gpu = NULL; + with_intel_runtime_pm(i915, wakeref) + gpu = i915_capture_gpu_state(i915); + if (IS_ERR(gpu)) + return PTR_ERR(gpu); file->private_data = gpu; return 0; @@ -1018,7 +996,13 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { - file->private_data = i915_first_error_state(inode->i_private); + struct i915_gpu_state *error; + + error = i915_first_error_state(inode->i_private); + if (IS_ERR(error)) + return PTR_ERR(error); + + file->private_data = error; return 0; } @@ -1032,39 +1016,16 @@ static const struct file_operations i915_error_state_fops = { }; #endif -static int -i915_next_seqno_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - struct drm_device *dev = &dev_priv->drm; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - intel_runtime_pm_get(dev_priv); - ret = i915_gem_set_global_seqno(dev, val); - intel_runtime_pm_put(dev_priv); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, - NULL, i915_next_seqno_set, - "0x%llx\n"); - static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; int ret = 0; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { u16 rgvswctl = I915_READ16(MEMSWCTL); u16 rgvstat = I915_READ16(MEMSTAT_ILK); @@ -1132,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } /* RPSTAT1 is in the GT power well */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); reqf = I915_READ(GEN6_RPNSWREQ); if (INTEL_GEN(dev_priv) >= 9) @@ -1160,7 +1121,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) cagf = intel_gpu_freq(dev_priv, intel_get_cagf(dev_priv, rpstat)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); if (INTEL_GEN(dev_priv) >= 11) { pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); @@ -1274,7 +1235,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -1313,35 +1274,29 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) u64 acthd[I915_NUM_ENGINES]; u32 seqno[I915_NUM_ENGINES]; struct intel_instdone instdone; + intel_wakeref_t wakeref; enum intel_engine_id id; + seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags); if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) - seq_puts(m, "Wedged\n"); + seq_puts(m, "\tWedged\n"); if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) - seq_puts(m, "Reset in progress: struct_mutex backoff\n"); - if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) - seq_puts(m, "Reset in progress: reset handoff to waiter\n"); - if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) - seq_puts(m, "Waiter holding struct mutex\n"); - if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) - seq_puts(m, "struct_mutex blocked for reset\n"); + seq_puts(m, "\tDevice (global) reset in progress\n"); if (!i915_modparams.enable_hangcheck) { seq_puts(m, "Hangcheck disabled\n"); return 0; } - intel_runtime_pm_get(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) { + for_each_engine(engine, dev_priv, id) { + acthd[id] = intel_engine_get_active_head(engine); + seqno[id] = intel_engine_get_hangcheck_seqno(engine); + } - for_each_engine(engine, dev_priv, id) { - acthd[id] = intel_engine_get_active_head(engine); - seqno[id] = intel_engine_get_seqno(engine); + intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone); } - intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); - - intel_runtime_pm_put(dev_priv); - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - @@ -1354,39 +1309,19 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); for_each_engine(engine, dev_priv, id) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node *rb; - seq_printf(m, "%s:\n", engine->name); - seq_printf(m, "\tseqno = %x [current %x, last %x]\n", - engine->hangcheck.seqno, seqno[id], - intel_engine_last_submit(engine)); - seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n", - yesno(intel_engine_has_waiter(engine)), - yesno(test_bit(engine->id, - &dev_priv->gpu_error.missed_irq_rings)), - yesno(engine->hangcheck.stalled), - yesno(engine->hangcheck.wedged)); - - spin_lock_irq(&b->rb_lock); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - seq_printf(m, "\t%s [%d] waiting for %x\n", - w->tsk->comm, w->tsk->pid, w->seqno); - } - spin_unlock_irq(&b->rb_lock); + seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n", + engine->hangcheck.last_seqno, + seqno[id], + engine->hangcheck.next_seqno, + jiffies_to_msecs(jiffies - + engine->hangcheck.action_timestamp)); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); - seq_printf(m, "\taction = %s(%d) %d ms ago\n", - hangcheck_action_to_str(engine->hangcheck.action), - engine->hangcheck.action, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); - if (engine->id == RCS) { + if (engine->id == RCS0) { seq_puts(m, "\tinstdone read =\n"); i915_instdone_info(dev_priv, m, &instdone); @@ -1478,13 +1413,14 @@ static int ironlake_drpc_info(struct seq_file *m) static int i915_forcewake_domains(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_uncore *uncore = &i915->uncore; struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; seq_printf(m, "user.bypass_count = %u\n", - i915->uncore.user_forcewake.count); + uncore->user_forcewake.count); - for_each_fw_domain(fw_domain, i915, tmp) + for_each_fw_domain(fw_domain, uncore, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), READ_ONCE(fw_domain->wake_count)); @@ -1616,18 +1552,17 @@ static int gen6_drpc_info(struct seq_file *m) static int i915_drpc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - int err; - - intel_runtime_pm_get(dev_priv); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = vlv_drpc_info(m); - else if (INTEL_GEN(dev_priv) >= 6) - err = gen6_drpc_info(m); - else - err = ironlake_drpc_info(m); - - intel_runtime_pm_put(dev_priv); + intel_wakeref_t wakeref; + int err = -ENODEV; + + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + err = vlv_drpc_info(m); + else if (INTEL_GEN(dev_priv) >= 6) + err = gen6_drpc_info(m); + else + err = ironlake_drpc_info(m); + } return err; } @@ -1649,11 +1584,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_fbc *fbc = &dev_priv->fbc; + intel_wakeref_t wakeref; if (!HAS_FBC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&fbc->lock); if (intel_fbc_is_active(dev_priv)) @@ -1680,7 +1616,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -1725,11 +1661,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; if (!HAS_IPS(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "Enabled by kernel parameter: %s\n", yesno(i915_modparams.enable_ips)); @@ -1743,7 +1680,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -1751,10 +1688,10 @@ static int i915_ips_status(struct seq_file *m, void *unused) static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; bool sr_enabled = false; - intel_runtime_pm_get(dev_priv); - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); if (INTEL_GEN(dev_priv) >= 9) /* no global SR status; inspect per-plane WM */; @@ -1770,8 +1707,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_runtime_pm_put(dev_priv); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); @@ -1780,31 +1716,24 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_emon_status(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - unsigned long temp, chipset, gfx; - int ret; + struct drm_i915_private *i915 = node_to_i915(m->private); + intel_wakeref_t wakeref; - if (!IS_GEN5(dev_priv)) + if (!IS_GEN(i915, 5)) return -ENODEV; - intel_runtime_pm_get(dev_priv); - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - temp = i915_mch_val(dev_priv); - chipset = i915_chipset_val(dev_priv); - gfx = i915_gfx_val(dev_priv); - mutex_unlock(&dev->struct_mutex); + with_intel_runtime_pm(i915, wakeref) { + unsigned long temp, chipset, gfx; - seq_printf(m, "GMCH temp: %ld\n", temp); - seq_printf(m, "Chipset power: %ld\n", chipset); - seq_printf(m, "GFX power: %ld\n", gfx); - seq_printf(m, "Total power: %ld\n", chipset + gfx); + temp = i915_mch_val(i915); + chipset = i915_chipset_val(i915); + gfx = i915_gfx_val(i915); - intel_runtime_pm_put(dev_priv); + seq_printf(m, "GMCH temp: %ld\n", temp); + seq_printf(m, "Chipset power: %ld\n", chipset); + seq_printf(m, "GFX power: %ld\n", gfx); + seq_printf(m, "Total power: %ld\n", chipset + gfx); + } return 0; } @@ -1814,13 +1743,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt_pm.rps; unsigned int max_gpu_freq, min_gpu_freq; + intel_wakeref_t wakeref; int gpu_freq, ia_freq; int ret; if (!HAS_LLC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) @@ -1853,7 +1783,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) mutex_unlock(&dev_priv->pcu_lock); out: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -1947,9 +1877,7 @@ static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; - struct intel_engine_cs *engine; struct i915_gem_context *ctx; - enum intel_engine_id id; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1957,6 +1885,8 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + struct intel_context *ce; + seq_puts(m, "HW context "); if (!list_empty(&ctx->hw_id_link)) seq_printf(m, "%x [pin %u]", ctx->hw_id, @@ -1979,11 +1909,8 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, '\n'); - for_each_engine(engine, dev_priv, id) { - struct intel_context *ce = - to_intel_context(ctx, engine); - - seq_printf(m, "%s: ", engine->name); + list_for_each_entry(ce, &ctx->active_engines, active_link) { + seq_printf(m, "%s: ", ce->engine->name); if (ce->state) describe_obj(m, ce->state->obj); if (ce->ring) @@ -2026,15 +1953,16 @@ static const char *swizzle_string(unsigned swizzle) static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_y)); - if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { + if (IS_GEN_RANGE(dev_priv, 3, 4)) { seq_printf(m, "DDC = 0x%08x\n", I915_READ(DCC)); seq_printf(m, "DDC2 = 0x%08x\n", @@ -2065,141 +1993,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(dev_priv); - - return 0; -} - -static int per_file_ctx(int id, void *ptr, void *data) -{ - struct i915_gem_context *ctx = ptr; - struct seq_file *m = data; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; - - if (!ppgtt) { - seq_printf(m, " no ppgtt for context %d\n", - ctx->user_handle); - return 0; - } - - if (i915_gem_context_is_default(ctx)) - seq_puts(m, " default context:\n"); - else - seq_printf(m, " context %d:\n", ctx->user_handle); - ppgtt->debug_dump(ppgtt, m); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } -static void gen8_ppgtt_info(struct seq_file *m, - struct drm_i915_private *dev_priv) -{ - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int i; - - if (!ppgtt) - return; - - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s\n", engine->name); - for (i = 0; i < 4; i++) { - u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); - pdp <<= 32; - pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); - seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); - } - } -} - -static void gen6_ppgtt_info(struct seq_file *m, - struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (IS_GEN6(dev_priv)) - seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s\n", engine->name); - if (IS_GEN7(dev_priv)) - seq_printf(m, "GFX_MODE: 0x%08x\n", - I915_READ(RING_MODE_GEN7(engine))); - seq_printf(m, "PP_DIR_BASE: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE(engine))); - seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE_READ(engine))); - seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", - I915_READ(RING_PP_DIR_DCLV(engine))); - } - if (dev_priv->mm.aliasing_ppgtt) { - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - - seq_puts(m, "aliasing PPGTT:\n"); - seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); - - ppgtt->debug_dump(ppgtt, m); - } - - seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); -} - -static int i915_ppgtt_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct drm_file *file; - int ret; - - mutex_lock(&dev->filelist_mutex); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out_unlock; - - intel_runtime_pm_get(dev_priv); - - if (INTEL_GEN(dev_priv) >= 8) - gen8_ppgtt_info(m, dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_ppgtt_info(m, dev_priv); - - list_for_each_entry_reverse(file, &dev->filelist, lhead) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct task_struct *task; - - task = get_pid_task(file->pid, PIDTYPE_PID); - if (!task) { - ret = -ESRCH; - goto out_rpm; - } - seq_printf(m, "\nproc: %s\n", task->comm); - put_task_struct(task); - idr_for_each(&file_priv->context_idr, per_file_ctx, - (void *)(unsigned long)m); - } - -out_rpm: - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); -out_unlock: - mutex_unlock(&dev->filelist_mutex); - return ret; -} - -static int count_irq_waiters(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int count = 0; - - for_each_engine(engine, i915, id) - count += intel_engine_has_waiter(engine); - - return count; -} - static const char *rps_power_to_str(unsigned int power) { static const char * const strings[] = { @@ -2217,12 +2015,11 @@ static const char *rps_power_to_str(unsigned int power) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 act_freq = rps->cur_freq; - struct drm_file *file; + intel_wakeref_t wakeref; - if (intel_runtime_pm_get_if_in_use(dev_priv)) { + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); act_freq = vlv_punit_read(dev_priv, @@ -2233,13 +2030,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) act_freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv); } seq_printf(m, "RPS enabled? %d\n", rps->enabled); seq_printf(m, "GPU busy? %s [%d requests]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); - seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); @@ -2256,22 +2051,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_gpu_freq(dev_priv, rps->efficient_freq), intel_gpu_freq(dev_priv, rps->boost_freq)); - mutex_lock(&dev->filelist_mutex); - list_for_each_entry_reverse(file, &dev->filelist, lhead) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); - seq_printf(m, "%s [%d]: %d boosts\n", - task ? task->comm : "<unknown>", - task ? task->pid : -1, - atomic_read(&file_priv->rps_client.boosts)); - rcu_read_unlock(); - } - seq_printf(m, "Kernel (anonymous) boosts: %d\n", - atomic_read(&rps->boosts)); - mutex_unlock(&dev->filelist_mutex); + seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && @@ -2279,12 +2059,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) u32 rpup, rpupei; u32 rpdown, rpdownei; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", rps_power_to_str(rps->power.mode)); @@ -2307,8 +2087,8 @@ static int i915_llc(struct seq_file *m, void *data) const bool edram = INTEL_GEN(dev_priv) > 8; seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); - seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", - intel_uncore_edram_size(dev_priv)/1024/1024); + seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", + dev_priv->edram_size_mb); return 0; } @@ -2316,6 +2096,7 @@ static int i915_llc(struct seq_file *m, void *data) static int i915_huc_load_status_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct drm_printer p; if (!HAS_HUC(dev_priv)) @@ -2324,9 +2105,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->huc.fw, &p); - intel_runtime_pm_get(dev_priv); - seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); return 0; } @@ -2334,8 +2114,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) static int i915_guc_load_status_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct drm_printer p; - u32 tmp, i; if (!HAS_GUC(dev_priv)) return -ENODEV; @@ -2343,22 +2123,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->guc.fw, &p); - intel_runtime_pm_get(dev_priv); - - tmp = I915_READ(GUC_STATUS); - - seq_printf(m, "\nGuC status 0x%08x:\n", tmp); - seq_printf(m, "\tBootrom status = 0x%x\n", - (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); - seq_printf(m, "\tuKernel status = 0x%x\n", - (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); - seq_printf(m, "\tMIA Core status = 0x%x\n", - (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); - seq_puts(m, "\nScratch registers:\n"); - for (i = 0; i < 16; i++) - seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); - - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 tmp = I915_READ(GUC_STATUS); + u32 i; + + seq_printf(m, "\nGuC status 0x%08x:\n", tmp); + seq_printf(m, "\tBootrom status = 0x%x\n", + (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); + seq_printf(m, "\tuKernel status = 0x%x\n", + (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); + seq_printf(m, "\tMIA Core status = 0x%x\n", + (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); + seq_puts(m, "\nScratch registers:\n"); + for (i = 0; i < 16; i++) { + seq_printf(m, "\t%2d: \t0x%x\n", + i, I915_READ(SOFT_SCRATCH(i))); + } + } return 0; } @@ -2410,7 +2191,7 @@ static void i915_guc_client_info(struct seq_file *m, { struct intel_engine_cs *engine; enum intel_engine_id id; - uint64_t tot = 0; + u64 tot = 0; seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", client->priority, client->stage_id, client->proc_desc_offset); @@ -2464,7 +2245,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) const struct intel_guc *guc = &dev_priv->guc; struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; struct intel_guc_client *client = guc->execbuf_client; - unsigned int tmp; + intel_engine_mask_t tmp; int index; if (!USES_GUC_SUBMISSION(dev_priv)) @@ -2665,7 +2446,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); static void psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) { - u32 val, psr_status; + u32 val, status_val; + const char *status = "unknown"; if (dev_priv->psr.psr2_enabled) { static const char * const live_status[] = { @@ -2681,14 +2463,11 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "BUF_ON", "TG_ON" }; - psr_status = I915_READ(EDP_PSR2_STATUS); - val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) { - seq_printf(m, "Source PSR status: 0x%x [%s]\n", - psr_status, live_status[val]); - return; - } + val = I915_READ(EDP_PSR2_STATUS); + status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; } else { static const char * const live_status[] = { "IDLE", @@ -2700,74 +2479,102 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - psr_status = I915_READ(EDP_PSR_STATUS); - val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >> - EDP_PSR_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) { - seq_printf(m, "Source PSR status: 0x%x [%s]\n", - psr_status, live_status[val]); - return; - } + val = I915_READ(EDP_PSR_STATUS); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; } - seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown"); + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); } static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - u32 psrperf = 0; - bool enabled = false; - bool sink_support; + struct i915_psr *psr = &dev_priv->psr; + intel_wakeref_t wakeref; + const char *status; + bool enabled; + u32 val; if (!HAS_PSR(dev_priv)) return -ENODEV; - sink_support = dev_priv->psr.sink_support; - seq_printf(m, "Sink_Support: %s\n", yesno(sink_support)); - if (!sink_support) - return 0; + seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); + if (psr->dp) + seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); + seq_puts(m, "\n"); - intel_runtime_pm_get(dev_priv); + if (!psr->sink_support) + return 0; - mutex_lock(&dev_priv->psr.lock); - seq_printf(m, "PSR mode: %s\n", - dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1"); - seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); - seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", - dev_priv->psr.busy_frontbuffer_bits); + wakeref = intel_runtime_pm_get(dev_priv); + mutex_lock(&psr->lock); - if (dev_priv->psr.psr2_enabled) - enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; + if (psr->enabled) + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; else - enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; + status = "disabled"; + seq_printf(m, "PSR mode: %s\n", status); - seq_printf(m, "Main link in standby mode: %s\n", - yesno(dev_priv->psr.link_standby)); + if (!psr->enabled) + goto unlock; - seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); + if (psr->psr2_enabled) { + val = I915_READ(EDP_PSR2_CTL); + enabled = val & EDP_PSR2_ENABLE; + } else { + val = I915_READ(EDP_PSR_CTL); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + enableddisabled(enabled), val); + psr_source_status(dev_priv, m); + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", + psr->busy_frontbuffer_bits); /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - psrperf = I915_READ(EDP_PSR_PERF_CNT) & - EDP_PSR_PERF_CNT_MASK; + val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); + } - seq_printf(m, "Performance_Counter: %u\n", psrperf); + if (psr->debug & I915_PSR_DEBUG_IRQ) { + seq_printf(m, "Last attempted entry at: %lld\n", + psr->last_entry_attempt); + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); } - psr_source_status(dev_priv, m); - mutex_unlock(&dev_priv->psr.lock); + if (psr->psr2_enabled) { + u32 su_frames_val[3]; + int frame; - if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) { - seq_printf(m, "Last attempted entry at: %lld\n", - dev_priv->psr.last_entry_attempt); - seq_printf(m, "Last exit at: %lld\n", - dev_priv->psr.last_exit); + /* + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) + su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame)); + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { + u32 su_blocks; + + su_blocks = su_frames_val[frame / 3] & + PSR2_SU_STATUS_MASK(frame); + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); + seq_printf(m, "%d\t%d\n", frame, su_blocks); + } } - intel_runtime_pm_put(dev_priv); +unlock: + mutex_unlock(&psr->lock); + intel_runtime_pm_put(dev_priv, wakeref); + return 0; } @@ -2775,7 +2582,7 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; - struct drm_modeset_acquire_ctx ctx; + intel_wakeref_t wakeref; int ret; if (!CAN_PSR(dev_priv)) @@ -2783,22 +2590,11 @@ i915_edp_psr_debug_set(void *data, u64 val) DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - intel_runtime_pm_get(dev_priv); - - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - -retry: - ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val); - if (ret == -EDEADLK) { - ret = drm_modeset_backoff(&ctx); - if (!ret) - goto retry; - } + wakeref = intel_runtime_pm_get(dev_priv); - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); + ret = intel_psr_debug_set(dev_priv, val); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -2823,24 +2619,20 @@ static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); unsigned long long power; + intel_wakeref_t wakeref; u32 units; if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - intel_runtime_pm_get(dev_priv); - - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { - intel_runtime_pm_put(dev_priv); + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) return -ENODEV; - } units = (power & 0x1f00) >> 8; - power = I915_READ(MCH_SECP_NRG_STTS); - power = (1000000 * power) >> units; /* convert to uJ */ - - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + power = I915_READ(MCH_SECP_NRG_STTS); + power = (1000000 * power) >> units; /* convert to uJ */ seq_printf(m, "%llu", power); return 0; @@ -2854,8 +2646,10 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) if (!HAS_RUNTIME_PM(dev_priv)) seq_puts(m, "Runtime power management not supported\n"); - seq_printf(m, "GPU idle: %s (epoch %u)\n", - yesno(!dev_priv->gt.awake), dev_priv->gt.epoch); + seq_printf(m, "Runtime power status: %s\n", + enableddisabled(!dev_priv->power_domains.wakeref)); + + seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "IRQs disabled: %s\n", yesno(!intel_irqs_enabled(dev_priv))); #ifdef CONFIG_PM @@ -2868,6 +2662,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) pci_power_name(pdev->current_state), pdev->current_state); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { + struct drm_printer p = drm_seq_file_printer(m); + + print_intel_runtime_pm_wakeref(dev_priv, &p); + } + return 0; } @@ -2902,6 +2702,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) static int i915_dmc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct intel_csr *csr; if (!HAS_CSR(dev_priv)) @@ -2909,7 +2710,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) csr = &dev_priv->csr; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); seq_printf(m, "path: %s\n", csr->fw_path); @@ -2935,7 +2736,7 @@ out: seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -2948,14 +2749,7 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs, for (i = 0; i < tabs; i++) seq_putc(m, '\t'); - seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", - mode->base.id, mode->name, - mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, - mode->type, mode->flags); + seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } static void intel_encoder_info(struct seq_file *m, @@ -3127,14 +2921,13 @@ static const char *plane_type(enum drm_plane_type type) return "unknown"; } -static const char *plane_rotation(unsigned int rotation) +static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) { - static char buf[48]; /* * According to doc only one DRM_MODE_ROTATE_ is allowed but this * will print them all to visualize if the values are misused */ - snprintf(buf, sizeof(buf), + snprintf(buf, bufsize, "%s%s%s%s%s%s(0x%08x)", (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", @@ -3143,8 +2936,6 @@ static const char *plane_rotation(unsigned int rotation) (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", rotation); - - return buf; } static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) @@ -3157,6 +2948,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_plane_state *state; struct drm_plane *plane = &intel_plane->base; struct drm_format_name_buf format_name; + char rot_str[48]; if (!plane->state) { seq_puts(m, "plane->state is NULL!\n"); @@ -3172,6 +2964,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) sprintf(format_name.str, "N/A"); } + plane_rotation(rot_str, sizeof(rot_str), state->rotation); + seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", plane->base.id, plane_type(intel_plane->base.type), @@ -3186,7 +2980,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) (state->src_h >> 16), ((state->src_h & 0xffff) * 15625) >> 10, format_name.str, - plane_rotation(state->rotation)); + rot_str); } } @@ -3225,8 +3019,10 @@ static int i915_display_info(struct seq_file *m, void *unused) struct intel_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(dev_priv); - intel_runtime_pm_get(dev_priv); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); for_each_intel_crtc(dev, crtc) { @@ -3274,7 +3070,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -3283,23 +3079,23 @@ static int i915_engine_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; + intel_wakeref_t wakeref; enum intel_engine_id id; struct drm_printer p; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); - seq_printf(m, "GT awake? %s (epoch %u)\n", - yesno(dev_priv->gt.awake), dev_priv->gt.epoch); + seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake)); seq_printf(m, "Global active requests: %d\n", dev_priv->gt.active_requests); seq_printf(m, "CS timestamp frequency: %u kHz\n", - dev_priv->info.cs_timestamp_frequency_khz); + RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz); p = drm_seq_file_printer(m); for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -3309,7 +3105,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p); + intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); return 0; } @@ -3376,7 +3172,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list; + const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list; struct i915_wa *wa; unsigned int i; @@ -3412,20 +3208,21 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - int ret; + intel_wakeref_t wakeref; bool enable; + int ret; ret = kstrtobool_from_user(ubuf, len, &enable); if (ret < 0) return ret; - intel_runtime_pm_get(dev_priv); - if (!dev_priv->ipc_enabled && enable) - DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->wm.distrust_bios_wm = true; - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) { + if (!dev_priv->ipc_enabled && enable) + DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); + dev_priv->wm.distrust_bios_wm = true; + dev_priv->ipc_enabled = enable; + intel_enable_ipc(dev_priv); + } return len; } @@ -3793,7 +3590,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); -static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) +static void wm_latency_show(struct seq_file *m, const u16 wm[8]) { struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; @@ -3836,7 +3633,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) static int pri_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3851,7 +3648,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3866,7 +3663,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3892,7 +3689,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) return -ENODEV; return single_open(file, spr_wm_latency_show, dev_priv); @@ -3902,19 +3699,19 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) return -ENODEV; return single_open(file, cur_wm_latency_show, dev_priv); } static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp, uint16_t wm[8]) + size_t len, loff_t *offp, u16 wm[8]) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; - uint16_t new[8] = { 0 }; + u16 new[8] = { 0 }; int num_levels; int level; int ret; @@ -3959,7 +3756,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3974,7 +3771,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3989,7 +3786,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -4029,43 +3826,31 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_wedged_get(void *data, u64 *val) { - struct drm_i915_private *dev_priv = data; - - *val = i915_terminally_wedged(&dev_priv->gpu_error); + int ret = i915_terminally_wedged(data); - return 0; + switch (ret) { + case -EIO: + *val = 1; + return 0; + case 0: + *val = 0; + return 0; + default: + return ret; + } } static int i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - struct intel_engine_cs *engine; - unsigned int tmp; - /* - * There is no safeguard against this debugfs entry colliding - * with the hangcheck calling same i915_handle_error() in - * parallel, causing an explosion. For now we assume that the - * test harness is responsible enough not to inject gpu hangs - * while it is writing to 'i915_wedged' - */ - - if (i915_reset_backoff(&i915->gpu_error)) - return -EAGAIN; - - for_each_engine_masked(engine, i915, val, tmp) { - engine->hangcheck.seqno = intel_engine_get_seqno(engine); - engine->hangcheck.stalled = true; - } + /* Flush any previous reset before applying for a new one */ + wait_event(i915->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)); i915_handle_error(i915, val, I915_ERROR_CAPTURE, "Manually set wedged engine mask = %llx", val); - - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE); - return 0; } @@ -4073,94 +3858,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, i915_wedged_get, i915_wedged_set, "%llu\n"); -static int -fault_irq_set(struct drm_i915_private *i915, - unsigned long *irq, - unsigned long val) -{ - int err; - - err = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (err) - return err; - - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (err) - goto err_unlock; - - *irq = val; - mutex_unlock(&i915->drm.struct_mutex); - - /* Flush idle worker to disarm irq */ - drain_delayed_work(&i915->gt.idle_work); - - return 0; - -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int -i915_ring_missed_irq_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - *val = dev_priv->gpu_error.missed_irq_rings; - return 0; -} - -static int -i915_ring_missed_irq_set(void *data, u64 val) -{ - struct drm_i915_private *i915 = data; - - return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, - i915_ring_missed_irq_get, i915_ring_missed_irq_set, - "0x%08llx\n"); - -static int -i915_ring_test_irq_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - *val = dev_priv->gpu_error.test_irq_rings; - - return 0; -} - -static int -i915_ring_test_irq_set(void *data, u64 val) -{ - struct drm_i915_private *i915 = data; - - /* GuC keeps the user interrupt permanently enabled for submission */ - if (USES_GUC_SUBMISSION(i915)) - return -ENODEV; - - /* - * From icl, we can no longer individually mask interrupt generation - * from each engine. - */ - if (INTEL_GEN(i915) >= 11) - return -ENODEV; - - val &= INTEL_INFO(i915)->ring_mask; - DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); - - return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, - i915_ring_test_irq_get, i915_ring_test_irq_set, - "0x%08llx\n"); - #define DROP_UNBOUND BIT(0) #define DROP_BOUND BIT(1) #define DROP_RETIRE BIT(2) @@ -4191,21 +3888,22 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - int ret = 0; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); - intel_runtime_pm_get(i915); - if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) + if (val & DROP_RESET_ACTIVE && + wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) i915_gem_set_wedged(i915); /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { + int ret; + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) - goto out; + return ret; if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(i915, @@ -4213,22 +3911,14 @@ i915_drop_caches_set(void *data, u64 val) I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - if (ret == 0 && val & DROP_RESET_SEQNO) - ret = i915_gem_set_global_seqno(&i915->drm, 1); - if (val & DROP_RETIRE) i915_retire_requests(i915); mutex_unlock(&i915->drm.struct_mutex); } - if (val & DROP_RESET_ACTIVE && - i915_terminally_wedged(&i915->gpu_error)) { + if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915)) i915_handle_error(i915, ALL_ENGINES, 0, NULL); - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE); - } fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) @@ -4252,10 +3942,7 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_FREED) i915_gem_drain_freed_objects(i915); -out: - intel_runtime_pm_put(i915); - - return ret; + return 0; } DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, @@ -4266,16 +3953,14 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - u32 snpcr; + intel_wakeref_t wakeref; + u32 snpcr = 0; - if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) + if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; - intel_runtime_pm_get(dev_priv); - - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -4286,24 +3971,25 @@ static int i915_cache_sharing_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; - u32 snpcr; + intel_wakeref_t wakeref; - if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) + if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; if (val > 3) return -EINVAL; - intel_runtime_pm_get(dev_priv); DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 snpcr; + + /* Update the cache sharing policy here as well */ + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= val << GEN6_MBC_SNPCR_SHIFT; + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + } - /* Update the cache sharing policy here as well */ - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - snpcr &= ~GEN6_MBC_SNPCR_MASK; - snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - - intel_runtime_pm_put(dev_priv); return 0; } @@ -4348,7 +4034,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { #define SS_MAX 6 - const struct intel_device_info *info = INTEL_INFO(dev_priv); + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; @@ -4404,7 +4090,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { #define SS_MAX 3 - const struct intel_device_info *info = INTEL_INFO(dev_priv); + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; @@ -4432,7 +4118,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, if (IS_GEN9_BC(dev_priv)) sseu->subslice_mask[s] = - INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; + RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -4466,10 +4152,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, if (sseu->slice_mask) { sseu->eu_per_subslice = - INTEL_INFO(dev_priv)->sseu.eu_per_subslice; + RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; for (s = 0; s < fls(sseu->slice_mask); s++) { sseu->subslice_mask[s] = - INTEL_INFO(dev_priv)->sseu.subslice_mask[s]; + RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; } sseu->eu_total = sseu->eu_per_subslice * sseu_subslice_total(sseu); @@ -4477,7 +4163,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { u8 subslice_7eu = - INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; + RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; sseu->eu_total -= hweight8(subslice_7eu); } @@ -4525,34 +4211,32 @@ static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct sseu_dev_info sseu; + intel_wakeref_t wakeref; if (INTEL_GEN(dev_priv) < 8) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); + i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices; - sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices; + sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; + sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; sseu.max_eus_per_subslice = - INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice; - - intel_runtime_pm_get(dev_priv); - - if (IS_CHERRYVIEW(dev_priv)) { - cherryview_sseu_device_status(dev_priv, &sseu); - } else if (IS_BROADWELL(dev_priv)) { - broadwell_sseu_device_status(dev_priv, &sseu); - } else if (IS_GEN9(dev_priv)) { - gen9_sseu_device_status(dev_priv, &sseu); - } else if (INTEL_GEN(dev_priv) >= 10) { - gen10_sseu_device_status(dev_priv, &sseu); + RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; + + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_sseu_device_status(dev_priv, &sseu); + else if (IS_BROADWELL(dev_priv)) + broadwell_sseu_device_status(dev_priv, &sseu); + else if (IS_GEN(dev_priv, 9)) + gen9_sseu_device_status(dev_priv, &sseu); + else if (INTEL_GEN(dev_priv) >= 10) + gen10_sseu_device_status(dev_priv, &sseu); } - intel_runtime_pm_put(dev_priv); - i915_print_sseu_info(m, false, &sseu); return 0; @@ -4565,8 +4249,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - intel_runtime_pm_get(i915); - intel_uncore_forcewake_user_get(i915); + file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); + intel_uncore_forcewake_user_get(&i915->uncore); return 0; } @@ -4578,8 +4262,9 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - intel_uncore_forcewake_user_put(i915); - intel_runtime_pm_put(i915); + intel_uncore_forcewake_user_put(&i915->uncore); + intel_runtime_pm_put(i915, + (intel_wakeref_t)(uintptr_t)file->private_data); return 0; } @@ -4906,7 +4591,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_context_status", i915_context_status, 0}, {"i915_forcewake_domains", i915_forcewake_domains, 0}, {"i915_swizzle_info", i915_swizzle_info, 0}, - {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, @@ -4933,15 +4617,12 @@ static const struct i915_debugfs_files { } i915_debugfs_files[] = { {"i915_wedged", &i915_wedged_fops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, - {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, - {"i915_ring_test_irq", &i915_ring_test_irq_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) {"i915_error_state", &i915_error_state_fops}, {"i915_gpu_info", &i915_gpu_info_fops}, #endif {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_next_seqno", &i915_next_seqno_fops}, {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, @@ -5014,7 +4695,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data) struct drm_connector *connector = m->private; struct intel_dp *intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); - uint8_t buf[16]; + u8 buf[16]; ssize_t err; int i; @@ -5088,6 +4769,108 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); +static int i915_dsc_fec_support_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc; + struct intel_dp *intel_dp; + struct drm_modeset_acquire_ctx ctx; + struct intel_crtc_state *crtc_state = NULL; + int ret = 0; + bool try_again = false; + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + + do { + try_again = false; + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + &ctx); + if (ret) { + if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { + try_again = true; + continue; + } + break; + } + crtc = connector->state->crtc; + if (connector->status != connector_status_connected || !crtc) { + ret = -ENODEV; + break; + } + ret = drm_modeset_lock(&crtc->mutex, &ctx); + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) { + try_again = true; + continue; + } + break; + } else if (ret) { + break; + } + intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); + crtc_state = to_intel_crtc_state(crtc->state); + seq_printf(m, "DSC_Enabled: %s\n", + yesno(crtc_state->dsc_params.compression_enable)); + seq_printf(m, "DSC_Sink_Support: %s\n", + yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); + if (!intel_dp_is_edp(intel_dp)) + seq_printf(m, "FEC_Sink_Support: %s\n", + yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); + } while (try_again); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + +static ssize_t i915_dsc_fec_support_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + bool dsc_enable = false; + int ret; + struct drm_connector *connector = + ((struct seq_file *)file->private_data)->private; + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + if (len == 0) + return 0; + + DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n", + len); + + ret = kstrtobool_from_user(ubuf, len, &dsc_enable); + if (ret < 0) + return ret; + + DRM_DEBUG_DRIVER("Got %s for DSC Enable\n", + (dsc_enable) ? "true" : "false"); + intel_dp->force_dsc_en = dsc_enable; + + *offp += len; + return len; +} + +static int i915_dsc_fec_support_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_dsc_fec_support_show, + inode->i_private); +} + +static const struct file_operations i915_dsc_fec_support_fops = { + .owner = THIS_MODULE, + .open = i915_dsc_fec_support_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_dsc_fec_support_write +}; + /** * i915_debugfs_connector_add - add i915 specific connector debugfs files * @connector: pointer to a registered drm_connector @@ -5100,6 +4883,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); int i915_debugfs_connector_add(struct drm_connector *connector) { struct dentry *root = connector->debugfs_entry; + struct drm_i915_private *dev_priv = to_i915(connector->dev); /* The connector must have been registered beforehands. */ if (!root) @@ -5124,5 +4908,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector) connector, &i915_hdcp_sink_capability_fops); } + if (INTEL_GEN(dev_priv) >= 10 && + (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP)) + debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, + connector, &i915_dsc_fec_support_fops); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b310a897a4ad..0bbf3f5db5fc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -41,14 +41,16 @@ #include <linux/vt.h> #include <acpi/video.h> -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" #include "i915_pmu.h" +#include "i915_reset.h" #include "i915_query.h" #include "i915_vgpu.h" #include "intel_drv.h" @@ -132,15 +134,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) switch (id) { case INTEL_PCH_IBX_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); - WARN_ON(!IS_GEN5(dev_priv)); + WARN_ON(!IS_GEN(dev_priv, 5)); return PCH_IBX; case INTEL_PCH_CPT_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found CougarPoint PCH\n"); - WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); + WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); return PCH_CPT; case INTEL_PCH_PPT_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found PantherPoint PCH\n"); - WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); + WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); /* PantherPoint is CPT compatible */ return PCH_CPT; case INTEL_PCH_LPT_DEVICE_ID_TYPE: @@ -186,6 +188,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; + case INTEL_PCH_CMP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); + WARN_ON(!IS_COFFEELAKE(dev_priv)); + /* CometPoint is CNP Compatible */ + return PCH_CNP; case INTEL_PCH_ICP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); @@ -217,20 +224,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) * make an educated guess as to which PCH is really there. */ - if (IS_GEN5(dev_priv)) - id = INTEL_PCH_IBX_DEVICE_ID_TYPE; - else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) - id = INTEL_PCH_CPT_DEVICE_ID_TYPE; + if (IS_ICELAKE(dev_priv)) + id = INTEL_PCH_ICP_DEVICE_ID_TYPE; + else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + id = INTEL_PCH_CNP_DEVICE_ID_TYPE; + else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) + id = INTEL_PCH_SPT_DEVICE_ID_TYPE; else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) id = INTEL_PCH_LPT_DEVICE_ID_TYPE; - else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - id = INTEL_PCH_SPT_DEVICE_ID_TYPE; - else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) - id = INTEL_PCH_CNP_DEVICE_ID_TYPE; - else if (IS_ICELAKE(dev_priv)) - id = INTEL_PCH_ICP_DEVICE_ID_TYPE; + else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) + id = INTEL_PCH_CPT_DEVICE_ID_TYPE; + else if (IS_GEN(dev_priv, 5)) + id = INTEL_PCH_IBX_DEVICE_ID_TYPE; if (id) DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); @@ -328,16 +335,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_BSD: - value = !!dev_priv->engine[VCS]; + value = !!dev_priv->engine[VCS0]; break; case I915_PARAM_HAS_BLT: - value = !!dev_priv->engine[BCS]; + value = !!dev_priv->engine[BCS0]; break; case I915_PARAM_HAS_VEBOX: - value = !!dev_priv->engine[VECS]; + value = !!dev_priv->engine[VECS0]; break; case I915_PARAM_HAS_BSD2: - value = !!dev_priv->engine[VCS2]; + value = !!dev_priv->engine[VCS1]; break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev_priv); @@ -346,10 +353,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_WT(dev_priv); break; case I915_PARAM_HAS_ALIASING_PPGTT: - value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); + value = INTEL_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = HAS_LEGACY_SEMAPHORES(dev_priv); + value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -358,12 +365,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_SUBSLICE_TOTAL: - value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); + value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu); if (!value) return -ENODEV; break; case I915_PARAM_EU_TOTAL: - value = INTEL_INFO(dev_priv)->sseu.eu_total; + value = RUNTIME_INFO(dev_priv)->sseu.eu_total; if (!value) return -ENODEV; break; @@ -380,7 +387,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_POOLED_EU(dev_priv); break; case I915_PARAM_MIN_EU_IN_POOL: - value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; + value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: value = intel_huc_check_status(&dev_priv->huc); @@ -430,17 +437,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = intel_engines_has_context_isolation(dev_priv); break; case I915_PARAM_SLICE_MASK: - value = INTEL_INFO(dev_priv)->sseu.slice_mask; + value = RUNTIME_INFO(dev_priv)->sseu.slice_mask; if (!value) return -ENODEV; break; case I915_PARAM_SUBSLICE_MASK: - value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0]; + value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]; if (!value) return -ENODEV; break; case I915_PARAM_CS_TIMESTAMP_FREQUENCY: - value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; + value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz; break; case I915_PARAM_MMAP_GTT_COHERENT: value = INTEL_INFO(dev_priv)->has_coherent_ggtt; @@ -712,8 +719,7 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - if (i915_gem_suspend(dev_priv)) - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + i915_gem_suspend(dev_priv); i915_gem_fini(dev_priv); cleanup_modeset: intel_modeset_cleanup(dev); @@ -755,39 +761,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - static void intel_init_dpio(struct drm_i915_private *dev_priv) { /* @@ -895,17 +868,22 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) if (i915_inject_load_failure()) return -ENODEV; + intel_device_info_subplatform_init(dev_priv); + + intel_uncore_init_early(&dev_priv->uncore); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); - spin_lock_init(&dev_priv->uncore.lock); mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); + mutex_init(&dev_priv->hdcp_comp_mutex); i915_memcpy_init_early(dev_priv); + intel_runtime_pm_init_early(dev_priv); ret = i915_workqueues_init(dev_priv); if (ret < 0) @@ -960,46 +938,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) i915_engines_cleanup(dev_priv); } -static int i915_mmio_setup(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int mmio_bar; - int mmio_size; - - mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; - /* - * Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (INTEL_GEN(dev_priv) < 5) - mmio_size = 512 * 1024; - else - mmio_size = 2 * 1024 * 1024; - dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); - if (dev_priv->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - - return -EIO; - } - - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev_priv); - - return 0; -} - -static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - - intel_teardown_mchbar(dev_priv); - pci_iounmap(pdev, dev_priv->regs); -} - /** * i915_driver_init_mmio - setup device MMIO * @dev_priv: device private @@ -1019,15 +957,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) if (i915_get_bridge_dev(dev_priv)) return -EIO; - ret = i915_mmio_setup(dev_priv); + ret = intel_uncore_init_mmio(&dev_priv->uncore); if (ret < 0) goto err_bridge; - intel_uncore_init(dev_priv); + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev_priv); intel_device_info_init_mmio(dev_priv); - intel_uncore_prune(dev_priv); + intel_uncore_prune_mmio_domains(&dev_priv->uncore); intel_uc_init_mmio(dev_priv); @@ -1040,8 +979,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev_priv); + intel_teardown_mchbar(dev_priv); + intel_uncore_fini_mmio(&dev_priv->uncore); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1054,8 +993,8 @@ err_bridge: */ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev_priv); + intel_teardown_mchbar(dev_priv); + intel_uncore_fini_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); } @@ -1064,110 +1003,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) intel_gvt_sanitize_options(dev_priv); } -static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank) +#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type + +static const char *intel_dram_type_str(enum intel_dram_type type) { - if (size == 0) - return I915_DRAM_RANK_INVALID; - if (rank == SKL_DRAM_RANK_SINGLE) - return I915_DRAM_RANK_SINGLE; - else if (rank == SKL_DRAM_RANK_DUAL) - return I915_DRAM_RANK_DUAL; + static const char * const str[] = { + DRAM_TYPE_STR(UNKNOWN), + DRAM_TYPE_STR(DDR3), + DRAM_TYPE_STR(DDR4), + DRAM_TYPE_STR(LPDDR3), + DRAM_TYPE_STR(LPDDR4), + }; + + if (type >= ARRAY_SIZE(str)) + type = INTEL_DRAM_UNKNOWN; - return I915_DRAM_RANK_INVALID; + return str[type]; } -static bool -skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width) +#undef DRAM_TYPE_STR + +static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) { - if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16) - return true; - else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32) - return true; - else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8) - return true; - else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16) - return true; + return dimm->ranks * 64 / (dimm->width ?: 1); +} - return false; +/* Returns total GB for the whole DIMM */ +static int skl_get_dimm_size(u16 val) +{ + return val & SKL_DRAM_SIZE_MASK; } -static int -skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val) +static int skl_get_dimm_width(u16 val) { - u32 tmp_l, tmp_s; - u32 s_val = val >> SKL_DRAM_S_SHIFT; + if (skl_get_dimm_size(val) == 0) + return 0; - if (!val) - return -EINVAL; + switch (val & SKL_DRAM_WIDTH_MASK) { + case SKL_DRAM_WIDTH_X8: + case SKL_DRAM_WIDTH_X16: + case SKL_DRAM_WIDTH_X32: + val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int skl_get_dimm_ranks(u16 val) +{ + if (skl_get_dimm_size(val) == 0) + return 0; + + val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; + + return val + 1; +} + +/* Returns total GB for the whole DIMM */ +static int cnl_get_dimm_size(u16 val) +{ + return (val & CNL_DRAM_SIZE_MASK) / 2; +} + +static int cnl_get_dimm_width(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; + + switch (val & CNL_DRAM_WIDTH_MASK) { + case CNL_DRAM_WIDTH_X8: + case CNL_DRAM_WIDTH_X16: + case CNL_DRAM_WIDTH_X32: + val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int cnl_get_dimm_ranks(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; - tmp_l = val & SKL_DRAM_SIZE_MASK; - tmp_s = s_val & SKL_DRAM_SIZE_MASK; + val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; - if (tmp_l == 0 && tmp_s == 0) + return val + 1; +} + +static bool +skl_is_16gb_dimm(const struct dram_dimm_info *dimm) +{ + /* Convert total GB to Gb per DRAM device */ + return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; +} + +static void +skl_dram_get_dimm_info(struct drm_i915_private *dev_priv, + struct dram_dimm_info *dimm, + int channel, char dimm_name, u16 val) +{ + if (INTEL_GEN(dev_priv) >= 10) { + dimm->size = cnl_get_dimm_size(val); + dimm->width = cnl_get_dimm_width(val); + dimm->ranks = cnl_get_dimm_ranks(val); + } else { + dimm->size = skl_get_dimm_size(val); + dimm->width = skl_get_dimm_width(val); + dimm->ranks = skl_get_dimm_ranks(val); + } + + DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", + channel, dimm_name, dimm->size, dimm->width, dimm->ranks, + yesno(skl_is_16gb_dimm(dimm))); +} + +static int +skl_dram_get_channel_info(struct drm_i915_private *dev_priv, + struct dram_channel_info *ch, + int channel, u32 val) +{ + skl_dram_get_dimm_info(dev_priv, &ch->dimm_l, + channel, 'L', val & 0xffff); + skl_dram_get_dimm_info(dev_priv, &ch->dimm_s, + channel, 'S', val >> 16); + + if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { + DRM_DEBUG_KMS("CH%u not populated\n", channel); return -EINVAL; + } - ch->l_info.size = tmp_l; - ch->s_info.size = tmp_s; - - tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; - tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; - ch->l_info.width = (1 << tmp_l) * 8; - ch->s_info.width = (1 << tmp_s) * 8; - - tmp_l = val & SKL_DRAM_RANK_MASK; - tmp_s = s_val & SKL_DRAM_RANK_MASK; - ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l); - ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s); - - if (ch->l_info.rank == I915_DRAM_RANK_DUAL || - ch->s_info.rank == I915_DRAM_RANK_DUAL) - ch->rank = I915_DRAM_RANK_DUAL; - else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE && - ch->s_info.rank == I915_DRAM_RANK_SINGLE) - ch->rank = I915_DRAM_RANK_DUAL; + if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) + ch->ranks = 2; + else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) + ch->ranks = 2; else - ch->rank = I915_DRAM_RANK_SINGLE; + ch->ranks = 1; - ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size, - ch->l_info.width) || - skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size, - ch->s_info.width); + ch->is_16gb_dimm = + skl_is_16gb_dimm(&ch->dimm_l) || + skl_is_16gb_dimm(&ch->dimm_s); - DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n", - ch->l_info.size, ch->l_info.width, - ch->l_info.rank ? "dual" : "single", - ch->s_info.size, ch->s_info.width, - ch->s_info.rank ? "dual" : "single"); + DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n", + channel, ch->ranks, yesno(ch->is_16gb_dimm)); return 0; } static bool -intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1, - struct dram_channel_info *ch0) +intel_is_dram_symmetric(const struct dram_channel_info *ch0, + const struct dram_channel_info *ch1) { - return (val_ch0 == val_ch1 && - (ch0->s_info.size == 0 || - (ch0->l_info.size == ch0->s_info.size && - ch0->l_info.width == ch0->s_info.width && - ch0->l_info.rank == ch0->s_info.rank))); + return !memcmp(ch0, ch1, sizeof(*ch0)) && + (ch0->dimm_s.size == 0 || + !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); } static int skl_dram_get_channels_info(struct drm_i915_private *dev_priv) { struct dram_info *dram_info = &dev_priv->dram_info; - struct dram_channel_info ch0, ch1; - u32 val_ch0, val_ch1; + struct dram_channel_info ch0 = {}, ch1 = {}; + u32 val; int ret; - val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(&ch0, val_ch0); + val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); if (ret == 0) dram_info->num_channels++; - val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(&ch1, val_ch1); + val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); if (ret == 0) dram_info->num_channels++; @@ -1181,28 +1190,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) * will be same as if single rank memory, so consider single rank * memory. */ - if (ch0.rank == I915_DRAM_RANK_SINGLE || - ch1.rank == I915_DRAM_RANK_SINGLE) - dram_info->rank = I915_DRAM_RANK_SINGLE; + if (ch0.ranks == 1 || ch1.ranks == 1) + dram_info->ranks = 1; else - dram_info->rank = max(ch0.rank, ch1.rank); + dram_info->ranks = max(ch0.ranks, ch1.ranks); - if (dram_info->rank == I915_DRAM_RANK_INVALID) { + if (dram_info->ranks == 0) { DRM_INFO("couldn't get memory rank information\n"); return -EINVAL; } dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; - dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, - val_ch1, - &ch0); + dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); - DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n", - dev_priv->dram_info.symmetric_memory ? "" : "not "); + DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n", + yesno(dram_info->symmetric_memory)); return 0; } +static enum intel_dram_type +skl_get_dram_type(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); + + switch (val & SKL_DRAM_DDR_TYPE_MASK) { + case SKL_DRAM_DDR_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case SKL_DRAM_DDR_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case SKL_DRAM_DDR_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case SKL_DRAM_DDR_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + static int skl_get_dram_info(struct drm_i915_private *dev_priv) { @@ -1210,6 +1238,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv) u32 mem_freq_khz, val; int ret; + dram_info->type = skl_get_dram_type(dev_priv); + DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type)); + ret = skl_dram_get_channels_info(dev_priv); if (ret) return ret; @@ -1230,6 +1261,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv) return 0; } +/* Returns Gb per DRAM device */ +static int bxt_get_dimm_size(u32 val) +{ + switch (val & BXT_DRAM_SIZE_MASK) { + case BXT_DRAM_SIZE_4GBIT: + return 4; + case BXT_DRAM_SIZE_6GBIT: + return 6; + case BXT_DRAM_SIZE_8GBIT: + return 8; + case BXT_DRAM_SIZE_12GBIT: + return 12; + case BXT_DRAM_SIZE_16GBIT: + return 16; + default: + MISSING_CASE(val); + return 0; + } +} + +static int bxt_get_dimm_width(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; + + return 8 << val; +} + +static int bxt_get_dimm_ranks(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + switch (val & BXT_DRAM_RANK_MASK) { + case BXT_DRAM_RANK_SINGLE: + return 1; + case BXT_DRAM_RANK_DUAL: + return 2; + default: + MISSING_CASE(val); + return 0; + } +} + +static enum intel_dram_type bxt_get_dimm_type(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return INTEL_DRAM_UNKNOWN; + + switch (val & BXT_DRAM_TYPE_MASK) { + case BXT_DRAM_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case BXT_DRAM_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case BXT_DRAM_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case BXT_DRAM_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + +static void bxt_get_dimm_info(struct dram_dimm_info *dimm, + u32 val) +{ + dimm->width = bxt_get_dimm_width(val); + dimm->ranks = bxt_get_dimm_ranks(val); + + /* + * Size in register is Gb per DRAM device. Convert to total + * GB to match the way we report this for non-LP platforms. + */ + dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; +} + static int bxt_get_dram_info(struct drm_i915_private *dev_priv) { @@ -1258,57 +1368,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv) * Now read each DUNIT8/9/10/11 to check the rank of each dimms. */ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { - u8 size, width; - enum dram_rank rank; - u32 tmp; + struct dram_dimm_info dimm; + enum intel_dram_type type; val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); if (val == 0xFFFFFFFF) continue; dram_info->num_channels++; - tmp = val & BXT_DRAM_RANK_MASK; - - if (tmp == BXT_DRAM_RANK_SINGLE) - rank = I915_DRAM_RANK_SINGLE; - else if (tmp == BXT_DRAM_RANK_DUAL) - rank = I915_DRAM_RANK_DUAL; - else - rank = I915_DRAM_RANK_INVALID; - - tmp = val & BXT_DRAM_SIZE_MASK; - if (tmp == BXT_DRAM_SIZE_4GB) - size = 4; - else if (tmp == BXT_DRAM_SIZE_6GB) - size = 6; - else if (tmp == BXT_DRAM_SIZE_8GB) - size = 8; - else if (tmp == BXT_DRAM_SIZE_12GB) - size = 12; - else if (tmp == BXT_DRAM_SIZE_16GB) - size = 16; - else - size = 0; - - tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; - width = (1 << tmp) * 8; - DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size, - width, rank == I915_DRAM_RANK_SINGLE ? "single" : - rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown"); + + bxt_get_dimm_info(&dimm, val); + type = bxt_get_dimm_type(val); + + WARN_ON(type != INTEL_DRAM_UNKNOWN && + dram_info->type != INTEL_DRAM_UNKNOWN && + dram_info->type != type); + + DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", + i - BXT_D_CR_DRP0_DUNIT_START, + dimm.size, dimm.width, dimm.ranks, + intel_dram_type_str(type)); /* * If any of the channel is single rank channel, * worst case output will be same as if single rank * memory, so consider single rank memory. */ - if (dram_info->rank == I915_DRAM_RANK_INVALID) - dram_info->rank = rank; - else if (rank == I915_DRAM_RANK_SINGLE) - dram_info->rank = I915_DRAM_RANK_SINGLE; + if (dram_info->ranks == 0) + dram_info->ranks = dimm.ranks; + else if (dimm.ranks == 1) + dram_info->ranks = 1; + + if (type != INTEL_DRAM_UNKNOWN) + dram_info->type = type; } - if (dram_info->rank == I915_DRAM_RANK_INVALID) { - DRM_INFO("couldn't get memory rank information\n"); + if (dram_info->type == INTEL_DRAM_UNKNOWN || + dram_info->ranks == 0) { + DRM_INFO("couldn't get memory information\n"); return -EINVAL; } @@ -1320,14 +1417,8 @@ static void intel_get_dram_info(struct drm_i915_private *dev_priv) { struct dram_info *dram_info = &dev_priv->dram_info; - char bandwidth_str[32]; int ret; - dram_info->valid = false; - dram_info->rank = I915_DRAM_RANK_INVALID; - dram_info->bandwidth_kbps = 0; - dram_info->num_channels = 0; - /* * Assume 16Gb DIMMs are present until proven otherwise. * This is only used for the level 0 watermark latency @@ -1335,28 +1426,61 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) */ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) < 9) return; - /* Need to calculate bandwidth only for Gen9 */ - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) ret = bxt_get_dram_info(dev_priv); - else if (IS_GEN9(dev_priv)) - ret = skl_get_dram_info(dev_priv); else - ret = skl_dram_get_channels_info(dev_priv); + ret = skl_get_dram_info(dev_priv); if (ret) return; - if (dram_info->bandwidth_kbps) - sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps); + DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n", + dram_info->bandwidth_kbps, + dram_info->num_channels); + + DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n", + dram_info->ranks, yesno(dram_info->is_16gb_dimm)); +} + +static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap) +{ + const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; + const unsigned int sets[4] = { 1, 1, 2, 2 }; + + return EDRAM_NUM_BANKS(cap) * + ways[EDRAM_WAYS_IDX(cap)] * + sets[EDRAM_SETS_IDX(cap)]; +} + +static void edram_detect(struct drm_i915_private *dev_priv) +{ + u32 edram_cap = 0; + + if (!(IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv) || + INTEL_GEN(dev_priv) >= 9)) + return; + + edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP); + + /* NB: We can't write IDICR yet because we don't have gt funcs set up */ + + if (!(edram_cap & EDRAM_ENABLED)) + return; + + /* + * The needed capability bits for size calculation are not there with + * pre gen9 so return 128MB always. + */ + if (INTEL_GEN(dev_priv) < 9) + dev_priv->edram_size_mb = 128; else - sprintf(bandwidth_str, "unknown"); - DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n", - bandwidth_str, dram_info->num_channels); - DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n", - (dram_info->rank == I915_DRAM_RANK_DUAL) ? - "dual" : "single", yesno(dram_info->is_16gb_dimm)); + dev_priv->edram_size_mb = + gen9_edram_size_mb(dev_priv, edram_cap); + + DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb); } /** @@ -1374,11 +1498,11 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (i915_inject_load_failure()) return -ENODEV; - intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); + intel_device_info_runtime_init(dev_priv); if (HAS_PPGTT(dev_priv)) { if (intel_vgpu_active(dev_priv) && - !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) { + !intel_vgpu_has_full_ppgtt(dev_priv)) { i915_report_error(dev_priv, "incompatible vGPU found, support for isolated ppGTT required\n"); return -ENXIO; @@ -1401,6 +1525,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_sanitize_options(dev_priv); + /* needs to be done before ggtt probe */ + edram_detect(dev_priv); + i915_perf_init(dev_priv); ret = i915_ggtt_probe_hw(dev_priv); @@ -1417,7 +1544,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) goto err_ggtt; } - ret = i915_kick_out_vgacon(dev_priv); + ret = vga_remove_vgacon(pdev); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto err_ggtt; @@ -1436,7 +1563,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(pdev); /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev_priv)) { + if (IS_GEN(dev_priv, 2)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); if (ret) { DRM_ERROR("failed to set DMA mask\n"); @@ -1574,7 +1701,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) acpi_video_register(); } - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) intel_gpu_ips_init(dev_priv); intel_audio_init(dev_priv); @@ -1636,8 +1763,16 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) if (drm_debug & DRM_UT_DRIVER) { struct drm_printer p = drm_debug_printer("i915 device info:"); - intel_device_info_dump(&dev_priv->info, &p); - intel_device_info_dump_runtime(&dev_priv->info, &p); + drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", + INTEL_DEVID(dev_priv), + INTEL_REVID(dev_priv), + intel_platform_name(INTEL_INFO(dev_priv)->platform), + intel_subplatform(RUNTIME_INFO(dev_priv), + INTEL_INFO(dev_priv)->platform), + INTEL_GEN(dev_priv)); + + intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p); + intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -1674,10 +1809,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the write-once "constant" device info */ device_info = mkwrite_device_info(i915); memcpy(device_info, match_info, sizeof(*device_info)); - device_info->device_id = pdev->device; + RUNTIME_INFO(i915)->device_id = pdev->device; - BUILD_BUG_ON(INTEL_MAX_PLATFORMS > - BITS_PER_TYPE(device_info->platform_mask)); BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); return i915; @@ -1774,8 +1907,10 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_unregister(dev_priv); - if (i915_gem_suspend(dev_priv)) - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + /* Flush any external code that still may be under the RCU lock */ + synchronize_rcu(); + + i915_gem_suspend(dev_priv); drm_atomic_helper_shutdown(dev); @@ -1802,8 +1937,7 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_mmio(dev_priv); enable_rpm_wakeref_asserts(dev_priv); - - WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count)); + intel_runtime_pm_cleanup(dev_priv); } static void i915_driver_release(struct drm_device *dev) @@ -1884,7 +2018,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) static int i915_drm_prepare(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); - int err; /* * NB intel_display_suspend() may issue new requests after we've @@ -1892,12 +2025,9 @@ static int i915_drm_prepare(struct drm_device *dev) * split out that work and pull it forward so that after point, * the GPU is not woken again. */ - err = i915_gem_suspend(i915); - if (err) - dev_err(&i915->drm.pdev->dev, - "GEM idle failed, suspend/resume might fail\n"); + i915_gem_suspend(i915); - return err; + return 0; } static int i915_drm_suspend(struct drm_device *dev) @@ -1967,7 +2097,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); - intel_uncore_suspend(dev_priv); + intel_uncore_suspend(&dev_priv->uncore); intel_power_domains_suspend(dev_priv, get_suspend_mode(dev_priv, hibernation)); @@ -2005,6 +2135,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) out: enable_rpm_wakeref_asserts(dev_priv); + if (!dev_priv->uncore.user_forcewake.count) + intel_runtime_pm_cleanup(dev_priv); return ret; } @@ -2161,7 +2293,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_resume_early(dev_priv); + intel_uncore_resume_early(&dev_priv->uncore); + + i915_check_and_clear_faults(dev_priv); if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { gen9_sanitize_dc_state(dev_priv); @@ -2174,7 +2308,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_power_domains_resume(dev_priv); - intel_engines_sanitize(dev_priv); + intel_engines_sanitize(dev_priv, true); enable_rpm_wakeref_asserts(dev_priv); @@ -2195,210 +2329,6 @@ static int i915_resume_switcheroo(struct drm_device *dev) return i915_drm_resume(dev); } -/** - * i915_reset - reset chip after a hang - * @i915: #drm_i915_private to reset - * @stalled_mask: mask of the stalled engines with the guilty requests - * @reason: user error message for why we are resetting - * - * Reset the chip. Useful if a hang is detected. Marks the device as wedged - * on failure. - * - * Caller must hold the struct_mutex. - * - * Procedure is fairly simple: - * - reset the chip using the reset reg - * - re-init context state - * - re-init hardware status page - * - re-init ring buffer - * - re-init interrupt state - * - re-init display - */ -void i915_reset(struct drm_i915_private *i915, - unsigned int stalled_mask, - const char *reason) -{ - struct i915_gpu_error *error = &i915->gpu_error; - int ret; - int i; - - GEM_TRACE("flags=%lx\n", error->flags); - - might_sleep(); - lockdep_assert_held(&i915->drm.struct_mutex); - GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - - if (!test_bit(I915_RESET_HANDOFF, &error->flags)) - return; - - /* Clear any previous failed attempts at recovery. Time to try again. */ - if (!i915_gem_unset_wedged(i915)) - goto wakeup; - - if (reason) - dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); - error->reset_count++; - - ret = i915_gem_reset_prepare(i915); - if (ret) { - dev_err(i915->drm.dev, "GPU recovery failed\n"); - goto taint; - } - - if (!intel_has_gpu_reset(i915)) { - if (i915_modparams.reset) - dev_err(i915->drm.dev, "GPU reset not supported\n"); - else - DRM_DEBUG_DRIVER("GPU reset disabled\n"); - goto error; - } - - for (i = 0; i < 3; i++) { - ret = intel_gpu_reset(i915, ALL_ENGINES); - if (ret == 0) - break; - - msleep(100); - } - if (ret) { - dev_err(i915->drm.dev, "Failed to reset chip\n"); - goto taint; - } - - /* Ok, now get things going again... */ - - /* - * Everything depends on having the GTT running, so we need to start - * there. - */ - ret = i915_ggtt_enable_hw(i915); - if (ret) { - DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", - ret); - goto error; - } - - i915_gem_reset(i915, stalled_mask); - intel_overlay_reset(i915); - - /* - * Next we need to restore the context, but we don't use those - * yet either... - * - * Ring buffer needs to be re-initialized in the KMS case, or if X - * was running at the time of the reset (i.e. we weren't VT - * switched away). - */ - ret = i915_gem_init_hw(i915); - if (ret) { - DRM_ERROR("Failed to initialise HW following reset (%d)\n", - ret); - goto error; - } - - i915_queue_hangcheck(i915); - -finish: - i915_gem_reset_finish(i915); -wakeup: - clear_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_bit(&error->flags, I915_RESET_HANDOFF); - return; - -taint: - /* - * History tells us that if we cannot reset the GPU now, we - * never will. This then impacts everything that is run - * subsequently. On failing the reset, we mark the driver - * as wedged, preventing further execution on the GPU. - * We also want to go one step further and add a taint to the - * kernel so that any subsequent faults can be traced back to - * this failure. This is important for CI, where if the - * GPU/driver fails we would like to reboot and restart testing - * rather than continue on into oblivion. For everyone else, - * the system should still plod along, but they have been warned! - */ - add_taint(TAINT_WARN, LOCKDEP_STILL_OK); -error: - i915_gem_set_wedged(i915); - i915_retire_requests(i915); - goto finish; -} - -static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - return intel_gpu_reset(dev_priv, intel_engine_flag(engine)); -} - -/** - * i915_reset_engine - reset GPU engine to recover from a hang - * @engine: engine to reset - * @msg: reason for GPU reset; or NULL for no dev_notice() - * - * Reset a specific GPU engine. Useful if a hang is detected. - * Returns zero on successful reset or otherwise an error code. - * - * Procedure is: - * - identifies the request that caused the hang and it is dropped - * - reset engine (which will force the engine to idle) - * - re-init/configure engine - */ -int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) -{ - struct i915_gpu_error *error = &engine->i915->gpu_error; - struct i915_request *active_request; - int ret; - - GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); - GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - - active_request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR_OR_NULL(active_request)) { - /* Either the previous reset failed, or we pardon the reset. */ - ret = PTR_ERR(active_request); - goto out; - } - - if (msg) - dev_notice(engine->i915->drm.dev, - "Resetting %s for %s\n", engine->name, msg); - error->reset_engine_count[engine->id]++; - - if (!engine->i915->guc.execbuf_client) - ret = intel_gt_reset_engine(engine->i915, engine); - else - ret = intel_guc_reset_engine(&engine->i915->guc, engine); - if (ret) { - /* If we fail here, we expect to fallback to a global reset */ - DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - engine->i915->guc.execbuf_client ? "GuC " : "", - engine->name, ret); - goto out; - } - - /* - * The request that caused the hang is stuck on elsp, we know the - * active request and can drop it, adjust head to skip the offending - * request to resume executing remaining requests in the queue. - */ - i915_gem_reset_engine(engine, active_request, true); - - /* - * The engine and its registers (and workarounds in case of render) - * have been reset to their default values. Follow the init_ring - * process to program RING_MODE, HWSP and re-enable submission. - */ - ret = engine->init_hw(engine); - if (ret) - goto out; - -out: - intel_engine_cancel_stop_cs(engine); - i915_gem_reset_finish_engine(engine); - return ret; -} - static int i915_pm_prepare(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); @@ -2736,6 +2666,10 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, u32 mask, u32 val) { + i915_reg_t reg = VLV_GTLC_PW_STATUS; + u32 reg_value; + int ret; + /* The HW does not like us polling for PW_STATUS frequently, so * use the sleeping loop rather than risk the busy spin within * intel_wait_for_register(). @@ -2743,8 +2677,12 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, * Transitioning between RC6 states should be at most 2ms (see * valleyview_enable_rps) so use a 3ms timeout. */ - return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, - 3); + ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3); + + /* just trace the final value */ + trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); + + return ret; } int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) @@ -2761,7 +2699,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) if (!force_on) return 0; - err = intel_wait_for_register(dev_priv, + err = intel_wait_for_register(&dev_priv->uncore, VLV_GTLC_SURVIVABILITY_REG, VLV_GFX_CLK_STATUS_BIT, VLV_GFX_CLK_STATUS_BIT, @@ -2927,7 +2865,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_disable_interrupts(dev_priv); - intel_uncore_suspend(dev_priv); + intel_uncore_suspend(&dev_priv->uncore); ret = 0; if (INTEL_GEN(dev_priv) >= 11) { @@ -2944,7 +2882,7 @@ static int intel_runtime_suspend(struct device *kdev) if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); - intel_uncore_runtime_resume(dev_priv); + intel_uncore_runtime_resume(&dev_priv->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -2959,9 +2897,9 @@ static int intel_runtime_suspend(struct device *kdev) } enable_rpm_wakeref_asserts(dev_priv); - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); + intel_runtime_pm_cleanup(dev_priv); - if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) + if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); dev_priv->runtime_pm.suspended = true; @@ -2989,7 +2927,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D1); } - assert_forcewakes_inactive(dev_priv); + assert_forcewakes_inactive(&dev_priv->uncore); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); @@ -3015,7 +2953,7 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); dev_priv->runtime_pm.suspended = false; - if (intel_uncore_unclaimed_mmio(dev_priv)) + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); if (INTEL_GEN(dev_priv) >= 11) { @@ -3041,7 +2979,7 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); } - intel_uncore_runtime_resume(dev_priv); + intel_uncore_runtime_resume(&dev_priv->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -3185,7 +3123,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), @@ -3203,7 +3141,7 @@ static struct drm_driver driver = { * deal with them for Intel hardware. */ .driver_features = - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | + DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, .release = i915_driver_release, .open = i915_driver_open, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b1c31967194b..4af815c3c02d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -45,8 +45,8 @@ #include <linux/pm_qos.h> #include <linux/reservation.h> #include <linux/shmem_fs.h> +#include <linux/stackdepot.h> -#include <drm/drmP.h> #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_gem.h> @@ -54,6 +54,8 @@ #include <drm/drm_cache.h> #include <drm/drm_util.h> #include <drm/drm_dsc.h> +#include <drm/drm_connector.h> +#include <drm/i915_mei_hdcp_interface.h> #include "i915_fixed.h" #include "i915_params.h" @@ -90,8 +92,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20181204" -#define DRIVER_TIMESTAMP 1543944377 +#define DRIVER_DATE "20190404" +#define DRIVER_TIMESTAMP 1554389037 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -130,6 +132,8 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) +typedef depot_stack_handle_t intel_wakeref_t; + enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -212,11 +216,12 @@ struct drm_i915_file_private { */ #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) } mm; + struct idr context_idr; + struct mutex context_idr_lock; /* guards context_idr */ - struct intel_rps_client { - atomic_t boosts; - } rps_client; + struct idr vm_idr; + struct mutex vm_idr_lock; /* guards vm_idr */ unsigned int bsd_engine; @@ -277,20 +282,19 @@ struct drm_i915_display_funcs { void (*get_cdclk)(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state); void (*set_cdclk)(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state); + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe); int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); int (*compute_pipe_wm)(struct intel_crtc_state *cstate); - int (*compute_intermediate_wm)(struct drm_device *dev, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *newstate); + int (*compute_intermediate_wm)(struct intel_crtc_state *newstate); void (*initial_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); void (*atomic_update_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc_state *cstate); - int (*compute_global_watermarks)(struct drm_atomic_state *state); + int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, @@ -322,8 +326,21 @@ struct drm_i915_display_funcs { /* display clock increase/decrease */ /* pll clock increase/decrease */ - void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); - void (*load_luts)(struct drm_crtc_state *crtc_state); + int (*color_check)(struct intel_crtc_state *crtc_state); + /* + * Program double buffered color management registers during + * vblank evasion. The registers should then latch during the + * next vblank start, alongside any other double buffered registers + * involved with the same commit. + */ + void (*color_commit)(const struct intel_crtc_state *crtc_state); + /* + * Load LUTs (and other single buffered color management + * registers). Will (hopefully) be called during the vblank + * following the latching of any double buffered registers + * involved with the same commit. + */ + void (*load_luts)(const struct intel_crtc_state *crtc_state); }; #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) @@ -333,16 +350,17 @@ struct drm_i915_display_funcs { struct intel_csr { struct work_struct work; const char *fw_path; - uint32_t required_version; - uint32_t max_fw_size; /* bytes */ - uint32_t *dmc_payload; - uint32_t dmc_fw_size; /* dwords */ - uint32_t version; - uint32_t mmio_count; + u32 required_version; + u32 max_fw_size; /* bytes */ + u32 *dmc_payload; + u32 dmc_fw_size; /* dwords */ + u32 version; + u32 mmio_count; i915_reg_t mmioaddr[8]; - uint32_t mmiodata[8]; - uint32_t dc_state; - uint32_t allowed_dc_mask; + u32 mmiodata[8]; + u32 dc_state; + u32 allowed_dc_mask; + intel_wakeref_t wakeref; }; enum i915_cache_level { @@ -398,7 +416,7 @@ struct intel_fbc { struct { unsigned int mode_flags; - uint32_t hsw_bdw_pixel_rate; + u32 hsw_bdw_pixel_rate; } crtc; struct { @@ -417,7 +435,7 @@ struct intel_fbc { int y; - uint16_t pixel_blend_mode; + u16 pixel_blend_mode; } plane; struct { @@ -494,7 +512,7 @@ struct i915_psr { u32 debug; bool sink_support; - bool prepared, enabled; + bool enabled; struct intel_dp *dp; enum pipe pipe; bool active; @@ -509,18 +527,25 @@ struct i915_psr { ktime_t last_exit; bool sink_not_reliable; bool irq_aux_error; + u16 su_x_granularity; }; +/* + * Sorted by south display engine compatibility. + * If the new PCH comes with a south display engine that is not + * inherited from the latest item, please do not add it to the + * end. Instead, add it right after its "parent" PCH. + */ enum intel_pch { + PCH_NOP = -1, /* PCH without south display */ PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ PCH_KBP, /* Kaby Lake PCH */ - PCH_CNP, /* Cannon Lake PCH */ + PCH_CNP, /* Cannon/Comet Lake PCH */ PCH_ICP, /* Ice Lake PCH */ - PCH_NOP, /* PCH without south display */ }; enum intel_sbi_destination { @@ -556,7 +581,7 @@ struct i915_suspend_saved_registers { u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - uint64_t saveFENCE[I915_MAX_NUM_FENCES]; + u64 saveFENCE[I915_MAX_NUM_FENCES]; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -819,6 +844,8 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; + intel_wakeref_t wakeref; + struct mutex lock; int domain_use_count[POWER_DOMAIN_NUM]; struct i915_power_well *power_wells; @@ -901,9 +928,9 @@ struct i915_gem_mm { atomic_t bsd_engine_dispatch_index; /** Bit 6 swizzling required for X tiling */ - uint32_t bit_6_swizzle_x; + u32 bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ - uint32_t bit_6_swizzle_y; + u32 bit_6_swizzle_y; /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; @@ -930,18 +957,21 @@ struct ddi_vbt_port_info { * populate this field. */ #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff - uint8_t hdmi_level_shift; + u8 hdmi_level_shift; - uint8_t supports_dvi:1; - uint8_t supports_hdmi:1; - uint8_t supports_dp:1; - uint8_t supports_edp:1; + u8 present:1; + u8 supports_dvi:1; + u8 supports_hdmi:1; + u8 supports_dp:1; + u8 supports_edp:1; + u8 supports_typec_usb:1; + u8 supports_tbt:1; - uint8_t alternate_aux_channel; - uint8_t alternate_ddc_pin; + u8 alternate_aux_channel; + u8 alternate_ddc_pin; - uint8_t dp_boost_level; - uint8_t hdmi_boost_level; + u8 dp_boost_level; + u8 hdmi_boost_level; int dp_max_link_rate; /* 0 for not limited by VBT */ }; @@ -990,6 +1020,7 @@ struct intel_vbt_data { enum psr_lines_to_wait lines_to_wait; int tp1_wakeup_time_us; int tp2_tp3_wakeup_time_us; + int psr2_tp2_tp3_wakeup_time_us; } psr; struct { @@ -1032,41 +1063,41 @@ enum intel_ddb_partitioning { struct intel_wm_level { bool enable; - uint32_t pri_val; - uint32_t spr_val; - uint32_t cur_val; - uint32_t fbc_val; + u32 pri_val; + u32 spr_val; + u32 cur_val; + u32 fbc_val; }; struct ilk_wm_values { - uint32_t wm_pipe[3]; - uint32_t wm_lp[3]; - uint32_t wm_lp_spr[3]; - uint32_t wm_linetime[3]; + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + u32 wm_linetime[3]; bool enable_fbc_wm; enum intel_ddb_partitioning partitioning; }; struct g4x_pipe_wm { - uint16_t plane[I915_MAX_PLANES]; - uint16_t fbc; + u16 plane[I915_MAX_PLANES]; + u16 fbc; }; struct g4x_sr_wm { - uint16_t plane; - uint16_t cursor; - uint16_t fbc; + u16 plane; + u16 cursor; + u16 fbc; }; struct vlv_wm_ddl_values { - uint8_t plane[I915_MAX_PLANES]; + u8 plane[I915_MAX_PLANES]; }; struct vlv_wm_values { struct g4x_pipe_wm pipe[3]; struct g4x_sr_wm sr; struct vlv_wm_ddl_values ddl[3]; - uint8_t level; + u8 level; bool cxsr; }; @@ -1080,10 +1111,10 @@ struct g4x_wm_values { }; struct skl_ddb_entry { - uint16_t start, end; /* in number of blocks, 'end' is exclusive */ + u16 start, end; /* in number of blocks, 'end' is exclusive */ }; -static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) { return entry->end - entry->start; } @@ -1107,9 +1138,11 @@ struct skl_ddb_values { }; struct skl_wm_level { - uint16_t plane_res_b; - uint8_t plane_res_l; + u16 min_ddb_alloc; + u16 plane_res_b; + u8 plane_res_l; bool plane_en; + bool ignore_lines; }; /* Stores plane specific WM parameters */ @@ -1117,15 +1150,15 @@ struct skl_wm_params { bool x_tiled, y_tiled; bool rc_surface; bool is_planar; - uint32_t width; - uint8_t cpp; - uint32_t plane_pixel_rate; - uint32_t y_min_scanlines; - uint32_t plane_bytes_per_line; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; uint_fixed_16_16_t plane_blocks_per_line; uint_fixed_16_16_t y_tile_minimum; - uint32_t linetime_us; - uint32_t dbuf_block_size; + u32 linetime_us; + u32 dbuf_block_size; }; /* @@ -1155,13 +1188,36 @@ struct i915_runtime_pm { atomic_t wakeref_count; bool suspended; bool irqs_enabled; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + /* + * To aide detection of wakeref leaks and general misuse, we + * track all wakeref holders. With manual markup (i.e. returning + * a cookie to each rpm_get caller which they then supply to their + * paired rpm_put) we can remove corresponding pairs of and keep + * the array trimmed to active wakerefs. + */ + struct intel_runtime_pm_debug { + spinlock_t lock; + + depot_stack_handle_t last_acquire; + depot_stack_handle_t last_release; + + depot_stack_handle_t *owners; + unsigned long count; + } debug; +#endif }; enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_NONE, INTEL_PIPE_CRC_SOURCE_PLANE1, INTEL_PIPE_CRC_SOURCE_PLANE2, - INTEL_PIPE_CRC_SOURCE_PF, + INTEL_PIPE_CRC_SOURCE_PLANE3, + INTEL_PIPE_CRC_SOURCE_PLANE4, + INTEL_PIPE_CRC_SOURCE_PLANE5, + INTEL_PIPE_CRC_SOURCE_PLANE6, + INTEL_PIPE_CRC_SOURCE_PLANE7, INTEL_PIPE_CRC_SOURCE_PIPE, /* TV/DP on pre-gen5/vlv can't use the pipe source. */ INTEL_PIPE_CRC_SOURCE_TV, @@ -1311,6 +1367,12 @@ struct i915_perf_stream { struct list_head link; /** + * @wakeref: As we keep the device awake while the perf stream is + * active, we track our runtime pm reference for later release. + */ + intel_wakeref_t wakeref; + + /** * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` * properties given when opening a stream, representing the contents * of a single sample as read() by userspace. @@ -1423,14 +1485,8 @@ struct intel_cdclk_state { struct drm_i915_private { struct drm_device drm; - struct kmem_cache *objects; - struct kmem_cache *vmas; - struct kmem_cache *luts; - struct kmem_cache *requests; - struct kmem_cache *dependencies; - struct kmem_cache *priorities; - - const struct intel_device_info info; + const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ + struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ struct intel_driver_caps caps; /** @@ -1457,8 +1513,6 @@ struct drm_i915_private { */ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ - void __iomem *regs; - struct intel_uncore uncore; struct i915_virtual_gpu vgpu; @@ -1482,14 +1536,14 @@ struct drm_i915_private { * Base address of where the gmbus and gpio blocks are located (either * on PCH or on SoC for platforms without PCH). */ - uint32_t gpio_mmio_base; + u32 gpio_mmio_base; /* MMIO base address for MIPI regs */ - uint32_t mipi_mmio_base; + u32 mipi_mmio_base; - uint32_t psr_mmio_base; + u32 psr_mmio_base; - uint32_t pps_mmio_base; + u32 pps_mmio_base; wait_queue_head_t gmbus_wait_queue; @@ -1576,6 +1630,8 @@ struct drm_i915_private { struct intel_cdclk_state actual; /* The current hardware cdclk state */ struct intel_cdclk_state hw; + + int force_min_cdclk; } cdclk; /** @@ -1654,8 +1710,11 @@ struct drm_i915_private { struct intel_l3_parity l3_parity; - /* Cannot be determined by PCIID. You must always read a register. */ - u32 edram_cap; + /* + * edram size in MB. + * Cannot be determined by PCIID. You must always read a register. + */ + u32 edram_size_mb; /* * Protects RPS/RC6 register access and PCU communication. @@ -1695,6 +1754,7 @@ struct drm_i915_private { * */ struct mutex av_mutex; + int audio_power_refcount; struct { struct mutex mutex; @@ -1744,17 +1804,17 @@ struct drm_i915_private { * in 0.5us units for WM1+. */ /* primary */ - uint16_t pri_latency[5]; + u16 pri_latency[5]; /* sprite */ - uint16_t spr_latency[5]; + u16 spr_latency[5]; /* cursor */ - uint16_t cur_latency[5]; + u16 cur_latency[5]; /* * Raw watermark memory latency values * for SKL for all 8 levels * in 1us units. */ - uint16_t skl_latency[8]; + u16 skl_latency[8]; /* current hardware state */ union { @@ -1764,7 +1824,7 @@ struct drm_i915_private { struct g4x_wm_values g4x; }; - uint8_t max_level; + u8 max_level; /* * Should be held around atomic WM register writing; also @@ -1785,13 +1845,16 @@ struct drm_i915_private { bool valid; bool is_16gb_dimm; u8 num_channels; - enum dram_rank { - I915_DRAM_RANK_INVALID = 0, - I915_DRAM_RANK_SINGLE, - I915_DRAM_RANK_DUAL - } rank; + u8 ranks; u32 bandwidth_kbps; bool symmetric_memory; + enum intel_dram_type { + INTEL_DRAM_UNKNOWN, + INTEL_DRAM_DDR3, + INTEL_DRAM_DDR4, + INTEL_DRAM_LPDDR3, + INTEL_DRAM_LPDDR4 + } type; } dram_info; struct i915_runtime_pm runtime_pm; @@ -1942,12 +2005,19 @@ struct drm_i915_private { void (*resume)(struct drm_i915_private *); void (*cleanup_engine)(struct intel_engine_cs *engine); - struct list_head timelines; + struct i915_gt_timelines { + struct mutex mutex; /* protects list, tainted by GPU */ + struct list_head active_list; + /* Pack multiple timelines' seqnos into the same page */ + spinlock_t hwsp_lock; + struct list_head hwsp_free_list; + } timelines; + + intel_engine_mask_t active_engines; struct list_head active_rings; struct list_head closed_vma; u32 active_requests; - u32 request_serial; /** * Is the GPU currently considered idle, or busy executing @@ -1956,13 +2026,7 @@ struct drm_i915_private { * In order to reduce the effect on performance, there * is a slight delay before we do so. */ - bool awake; - - /** - * The number of times we have woken up. - */ - unsigned int epoch; -#define I915_EPOCH_INVALID 0 + intel_wakeref_t awake; /** * We leave the user IRQ off as much as possible, @@ -1987,6 +2051,14 @@ struct drm_i915_private { struct i915_vma *scratch; } gt; + /* For i945gm vblank irq vs. C3 workaround */ + struct { + struct work_struct work; + struct pm_qos_request pm_qos; + u8 c3_disable_latency; + u8 enabled; + } i945gm_vblank; + /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2003,18 +2075,25 @@ struct drm_i915_private { struct i915_pmu pmu; + struct i915_hdcp_comp_master *hdcp_master; + bool hdcp_comp_added; + + /* Mutex to protect the above hdcp component related values. */ + struct mutex hdcp_comp_mutex; + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. */ }; +struct dram_dimm_info { + u8 size, width, ranks; +}; + struct dram_channel_info { - struct info { - u8 size, width; - enum dram_rank rank; - } l_info, s_info; - enum dram_rank rank; + struct dram_dimm_info dimm_l, dimm_s; + u8 ranks; bool is_16gb_dimm; }; @@ -2043,6 +2122,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) return container_of(huc, struct drm_i915_private, huc); } +static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore) +{ + return container_of(uncore, struct drm_i915_private, uncore); +} + /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ @@ -2052,7 +2136,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ - for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \ + for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \ (tmp__) ? \ ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) @@ -2191,17 +2275,12 @@ static inline unsigned int i915_sg_segment_size(void) return size; } -static inline const struct intel_device_info * -intel_info(const struct drm_i915_private *dev_priv) -{ - return &dev_priv->info; -} - -#define INTEL_INFO(dev_priv) intel_info((dev_priv)) +#define INTEL_INFO(dev_priv) (&(dev_priv)->__info) +#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) -#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) -#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) +#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen) +#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) #define REVID_FOREVER 0xff #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) @@ -2212,8 +2291,12 @@ intel_info(const struct drm_i915_private *dev_priv) GENMASK((e) - 1, (s) - 1)) /* Returns true if Gen is in inclusive range [Start, End] */ -#define IS_GEN(dev_priv, s, e) \ - (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) +#define IS_GEN_RANGE(dev_priv, s, e) \ + (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e)))) + +#define IS_GEN(dev_priv, n) \ + (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ + INTEL_INFO(dev_priv)->gen == (n)) /* * Return true if revision is in range [since,until] inclusive. @@ -2223,7 +2306,69 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_REVID(p, since, until) \ (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) -#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) +static __always_inline unsigned int +__platform_mask_index(const struct intel_runtime_info *info, + enum intel_platform p) +{ + const unsigned int pbits = + BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; + + /* Expand the platform_mask array if this fails. */ + BUILD_BUG_ON(INTEL_MAX_PLATFORMS > + pbits * ARRAY_SIZE(info->platform_mask)); + + return p / pbits; +} + +static __always_inline unsigned int +__platform_mask_bit(const struct intel_runtime_info *info, + enum intel_platform p) +{ + const unsigned int pbits = + BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; + + return p % pbits + INTEL_SUBPLATFORM_BITS; +} + +static inline u32 +intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) +{ + const unsigned int pi = __platform_mask_index(info, p); + + return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; +} + +static __always_inline bool +IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) +{ + const struct intel_runtime_info *info = RUNTIME_INFO(i915); + const unsigned int pi = __platform_mask_index(info, p); + const unsigned int pb = __platform_mask_bit(info, p); + + BUILD_BUG_ON(!__builtin_constant_p(p)); + + return info->platform_mask[pi] & BIT(pb); +} + +static __always_inline bool +IS_SUBPLATFORM(const struct drm_i915_private *i915, + enum intel_platform p, unsigned int s) +{ + const struct intel_runtime_info *info = RUNTIME_INFO(i915); + const unsigned int pi = __platform_mask_index(info, p); + const unsigned int pb = __platform_mask_bit(info, p); + const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; + const u32 mask = info->platform_mask[pi]; + + BUILD_BUG_ON(!__builtin_constant_p(p)); + BUILD_BUG_ON(!__builtin_constant_p(s)); + BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); + + /* Shift and test on the MSB position so sign flag can be used. */ + return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); +} + +#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) @@ -2238,14 +2383,14 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) -#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) -#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) -#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) +#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) +#define IS_IRONLAKE_M(dev_priv) \ + (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ - (dev_priv)->info.gt == 1) + INTEL_INFO(dev_priv)->gt == 1) #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) @@ -2257,61 +2402,55 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) -#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) +#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) -#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ - ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ - (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ - (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) -/* ULX machines are also considered ULT. */ -#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0xf) == 0xe) +#define IS_BDW_ULT(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) +#define IS_BDW_ULX(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ - (dev_priv)->info.gt == 3) -#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) + INTEL_INFO(dev_priv)->gt == 3) +#define IS_HSW_ULT(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) +#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ + INTEL_INFO(dev_priv)->gt == 1) /* ULX machines are also considered ULT. */ -#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ - INTEL_DEVID(dev_priv) == 0x0A1E) -#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ - INTEL_DEVID(dev_priv) == 0x1913 || \ - INTEL_DEVID(dev_priv) == 0x1916 || \ - INTEL_DEVID(dev_priv) == 0x1921 || \ - INTEL_DEVID(dev_priv) == 0x1926) -#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ - INTEL_DEVID(dev_priv) == 0x1915 || \ - INTEL_DEVID(dev_priv) == 0x191E) -#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ - INTEL_DEVID(dev_priv) == 0x5913 || \ - INTEL_DEVID(dev_priv) == 0x5916 || \ - INTEL_DEVID(dev_priv) == 0x5921 || \ - INTEL_DEVID(dev_priv) == 0x5926) -#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ - INTEL_DEVID(dev_priv) == 0x5915 || \ - INTEL_DEVID(dev_priv) == 0x591E) -#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ - INTEL_DEVID(dev_priv) == 0x87C0) +#define IS_HSW_ULX(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) +#define IS_SKL_ULT(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) +#define IS_SKL_ULX(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) +#define IS_KBL_ULT(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) +#define IS_KBL_ULX(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) +#define IS_AML_ULX(dev_priv) \ + (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \ + IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML)) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) + INTEL_INFO(dev_priv)->gt == 3) #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 4) + INTEL_INFO(dev_priv)->gt == 4) #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) -#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) + INTEL_INFO(dev_priv)->gt == 3) +#define IS_CFL_ULT(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ - (dev_priv)->info.gt == 2) + INTEL_INFO(dev_priv)->gt == 2) #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ - (dev_priv)->info.gt == 3) -#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004) + INTEL_INFO(dev_priv)->gt == 3) +#define IS_CNL_WITH_PORT_F(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF) +#define IS_ICL_WITH_PORT_F(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) @@ -2366,81 +2505,54 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_ICL_REVID(p, since, until) \ (IS_ICELAKE(p) && IS_REVID(p, since, until)) -/* - * The genX designation typically refers to the render engine, so render - * capability related checks should use IS_GEN, while display and other checks - * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular - * chips, etc.). - */ -#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) -#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) -#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) -#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) -#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) -#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) -#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) -#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) -#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9))) -#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10))) - #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) -#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) -#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) - -#define ENGINE_MASK(id) BIT(id) -#define RENDER_RING ENGINE_MASK(RCS) -#define BSD_RING ENGINE_MASK(VCS) -#define BLT_RING ENGINE_MASK(BCS) -#define VEBOX_RING ENGINE_MASK(VECS) -#define BSD2_RING ENGINE_MASK(VCS2) -#define BSD3_RING ENGINE_MASK(VCS3) -#define BSD4_RING ENGINE_MASK(VCS4) -#define VEBOX2_RING ENGINE_MASK(VECS2) -#define ALL_ENGINES (~0) - -#define HAS_ENGINE(dev_priv, id) \ - (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) - -#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) -#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) -#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) -#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) - -#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) - -#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) -#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) -#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) +#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) +#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) + +#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) + +#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ + unsigned int first__ = (first); \ + unsigned int count__ = (count); \ + (INTEL_INFO(dev_priv)->engine_mask & \ + GENMASK(first__ + count__ - 1, first__)) >> first__; \ +}) +#define VDBOX_MASK(dev_priv) \ + ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) +#define VEBOX_MASK(dev_priv) \ + ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) + +#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) +#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) +#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) -#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) +#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ - ((dev_priv)->info.has_logical_ring_contexts) + (INTEL_INFO(dev_priv)->has_logical_ring_contexts) #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ - ((dev_priv)->info.has_logical_ring_elsq) + (INTEL_INFO(dev_priv)->has_logical_ring_elsq) #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ - ((dev_priv)->info.has_logical_ring_preemption) + (INTEL_INFO(dev_priv)->has_logical_ring_preemption) #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) -#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt) +#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) #define HAS_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) #define HAS_FULL_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) -#define HAS_FULL_48BIT_PPGTT(dev_priv) \ - (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL) #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ - ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ + ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ }) -#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay) +#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ - ((dev_priv)->info.display.overlay_needs_physical) + (INTEL_INFO(dev_priv)->display.overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) @@ -2458,42 +2570,43 @@ intel_info(const struct drm_i915_private *dev_priv) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ -#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ +#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \ !(IS_I915G(dev_priv) || \ IS_I915GM(dev_priv))) -#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv) -#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug) +#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) +#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) -#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc) -#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) +#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) +#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) -#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst) +#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) -#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi) -#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) -#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr) +#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) +#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) +#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) +#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0) -#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) -#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) +#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) +#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ -#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr) +#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr) -#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) -#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) +#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) +#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) -#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc) +#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) /* * For now, anything with a GuC requires uCode loading, and then supports * command submission once loaded. But these are logically independent * properties, so we have separate macros to test them. */ -#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) -#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct) +#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) +#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct) #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) @@ -2502,11 +2615,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) /* Having a GuC is not the same as using a GuC */ -#define USES_GUC(dev_priv) intel_uc_is_using_guc() -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() -#define USES_HUC(dev_priv) intel_uc_is_using_huc() +#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv) +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv) +#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv) -#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) +#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) #define INTEL_PCH_DEVICE_ID_MASK 0xff80 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2521,6 +2634,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 +#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 @@ -2530,8 +2644,6 @@ intel_info(const struct drm_i915_private *dev_priv) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) -#define HAS_PCH_CNP_LP(dev_priv) \ - (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) @@ -2546,12 +2658,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) -#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display) +#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) /* DPF == dynamic parity feature */ -#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) +#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2 : HAS_L3_DPF(dev_priv)) @@ -2601,19 +2713,7 @@ extern const struct dev_pm_ops i915_pm_ops; extern int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); extern void i915_driver_unload(struct drm_device *dev); -extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); - -extern void i915_reset(struct drm_i915_private *i915, - unsigned int stalled_mask, - const char *reason); -extern int i915_reset_engine(struct intel_engine_cs *engine, - const char *reason); - -extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); -extern int intel_reset_guc(struct drm_i915_private *dev_priv); -extern int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine); + extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2656,20 +2756,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) &dev_priv->gpu_error.hangcheck_work, delay); } -__printf(4, 5) -void i915_handle_error(struct drm_i915_private *dev_priv, - u32 engine_mask, - unsigned long flags, - const char *fmt, ...); -#define I915_ERROR_CAPTURE BIT(0) - extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -void i915_clear_error_registers(struct drm_i915_private *dev_priv); - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; @@ -2693,45 +2784,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits); + u32 mask, + u32 bits); void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, bits); } static inline void -ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, bits); } static inline void -ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, 0); } @@ -2784,8 +2875,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); -void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); -void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object * @@ -2916,13 +3005,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(obj); } -enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ +enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ I915_MM_NORMAL = 0, - I915_MM_SHRINKER + I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ }; -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass); +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass); void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { @@ -2958,6 +3047,14 @@ i915_coherent_map_type(struct drm_i915_private *i915) void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type); +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size); +static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_flush_map(obj, 0, obj->base.size); +} + /** * i915_gem_object_unpin_map - releases an earlier mapping * @obj: the object to unmap @@ -2986,12 +3083,17 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +static inline int __must_check +i915_mutex_lock_interruptible(struct drm_device *dev) +{ + return mutex_lock_interruptible(&dev->struct_mutex); +} + int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle, uint64_t *offset); + u32 handle, u64 *offset); int i915_gem_mmap_gtt_version(void); void i915_gem_track_fb(struct drm_i915_gem_object *old, @@ -3000,27 +3102,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); -struct i915_request * -i915_gem_find_active_request(struct intel_engine_cs *engine); - -static inline bool i915_reset_backoff(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); -} - -static inline bool i915_reset_handoff(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); -} - -static inline bool i915_terminally_wedged(struct i915_gpu_error *error) +static inline bool __i915_wedged(struct i915_gpu_error *error) { return unlikely(test_bit(I915_WEDGED, &error->flags)); } -static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error) +static inline bool i915_reset_failed(struct drm_i915_private *i915) { - return i915_reset_backoff(error) | i915_terminally_wedged(error); + return __i915_wedged(&i915->gpu_error); } static inline u32 i915_reset_count(struct i915_gpu_error *error) @@ -3034,18 +3123,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, return READ_ONCE(error->reset_engine_count[engine->id]); } -struct i915_request * -i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); -int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); -void i915_gem_reset(struct drm_i915_private *dev_priv, - unsigned int stalled_mask); -void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); -void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); -void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); @@ -3055,14 +3134,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); -int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); +void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps); + long timeout); int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); @@ -3105,7 +3183,6 @@ struct drm_i915_fence_reg * i915_reserve_fence(struct drm_i915_private *dev_priv); void i915_unreserve_fence(struct drm_i915_fence_reg *fence); -void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); void i915_gem_restore_fences(struct drm_i915_private *dev_priv); void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); @@ -3141,8 +3218,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - uint32_t *reg_state); + struct intel_context *ce, + u32 *reg_state); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3204,7 +3281,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); -void i915_gem_shrinker_taints_mutex(struct mutex *mutex); +void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, + struct mutex *mutex); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -3313,7 +3391,21 @@ static inline void intel_unregister_dsm_handler(void) { return; } static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) { - return (struct intel_device_info *)&dev_priv->info; + return (struct intel_device_info *)INTEL_INFO(dev_priv); +} + +static inline struct intel_sseu +intel_device_default_sseu(struct drm_i915_private *i915) +{ + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct intel_sseu value = { + .slice_mask = sseu->slice_mask, + .subslice_mask = sseu->subslice_mask[0], + .min_eus_per_subslice = sseu->max_eus_per_subslice, + .max_eus_per_subslice = sseu->max_eus_per_subslice, + }; + + return value; } /* modesetting */ @@ -3393,10 +3485,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy); bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy); -uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count); +u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count); void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - uint8_t lane_lat_optim_mask); -uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); + u8 lane_lat_optim_mask); +u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); void chv_set_phy_signal_level(struct intel_encoder *encoder, u32 deemph_reg_value, u32 margin_reg_value, @@ -3441,18 +3533,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); } -#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) -#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) +#define __I915_REG_OP(op__, dev_priv__, ...) \ + intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) -#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) -#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) -#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) -#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) +#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__)) +#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__)) -#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) -#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) -#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) -#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) +#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) +#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) +#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__)) +#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__)) + +#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) +#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) +#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) +#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) /* Be very careful with read/write 64-bit values. On 32-bit machines, they * will be implemented using 2 32-bit writes in an arbitrary order with @@ -3468,46 +3563,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, * * You have been warned. */ -#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) - -#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper, lower, old_upper, loop = 0; \ - upper = I915_READ(upper_reg); \ - do { \ - old_upper = upper; \ - lower = I915_READ(lower_reg); \ - upper = I915_READ(upper_reg); \ - } while (upper != old_upper && loop++ < 2); \ - (u64)upper << 32 | lower; }) - -#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) -#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) - -#define __raw_read(x, s) \ -static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ - i915_reg_t reg) \ -{ \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ -} +#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__)) +#define I915_READ64_2x32(lower_reg__, upper_reg__) \ + __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__)) -#define __raw_write(x, s) \ -static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ - i915_reg_t reg, uint##x##_t val) \ -{ \ - write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ -} -__raw_read(8, b) -__raw_read(16, w) -__raw_read(32, l) -__raw_read(64, q) - -__raw_write(8, b) -__raw_write(16, w) -__raw_write(32, l) -__raw_write(64, q) - -#undef __raw_read -#undef __raw_write +#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) +#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__)) /* These are untraced mmio-accessors that are only valid to be used inside * critical sections, such as inside IRQ handlers, where forcewake is explicitly @@ -3535,10 +3596,10 @@ __raw_write(64, q) * therefore generally be serialised, by either the dev_priv->uncore.lock or * a more localised lock guarding all access to that bank of registers. */ -#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) -#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) -#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) -#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) +#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) +#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) +#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__)) +#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__)) /* "Broadcast RGB" property */ #define INTEL_BROADCAST_RGB_AUTO 0 @@ -3599,90 +3660,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } -static inline bool -__i915_request_irq_complete(const struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - u32 seqno; - - /* Note that the engine may have wrapped around the seqno, and - * so our request->global_seqno will be ahead of the hardware, - * even though it completed the request before wrapping. We catch - * this by kicking all the waiters before resetting the seqno - * in hardware, and also signal the fence. - */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) - return true; - - /* The request was dequeued before we were awoken. We check after - * inspecting the hw to confirm that this was the same request - * that generated the HWS update. The memory barriers within - * the request execution are sufficient to ensure that a check - * after reading the value from hw matches this request. - */ - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; - - /* Before we do the heavier coherent read of the seqno, - * check the value (hopefully) in the CPU cacheline. - */ - if (__i915_request_completed(rq, seqno)) - return true; - - /* Ensure our read of the seqno is coherent so that we - * do not "miss an interrupt" (i.e. if this is the last - * request and the seqno write from the GPU is not visible - * by the time the interrupt fires, we will see that the - * request is incomplete and go back to sleep awaiting - * another interrupt that will never come.) - * - * Strictly, we only need to do this once after an interrupt, - * but it is easier and safer to do it every time the waiter - * is woken. - */ - if (engine->irq_seqno_barrier && - test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - /* The ordering of irq_posted versus applying the barrier - * is crucial. The clearing of the current irq_posted must - * be visible before we perform the barrier operation, - * such that if a subsequent interrupt arrives, irq_posted - * is reasserted and our task rewoken (which causes us to - * do another __i915_request_irq_complete() immediately - * and reapply the barrier). Conversely, if the clear - * occurs after the barrier, then an interrupt that arrived - * whilst we waited on the barrier would not trigger a - * barrier on the next pass, and the read may not see the - * seqno update. - */ - engine->irq_seqno_barrier(engine); - - /* If we consume the irq, but we are no longer the bottom-half, - * the real bottom-half may not have serialised their own - * seqno check with the irq-barrier (i.e. may have inspected - * the seqno before we believe it coherent since they see - * irq_posted == false but we are still running). - */ - spin_lock_irq(&b->irq_lock); - if (b->irq_wait && b->irq_wait->tsk != current) - /* Note that if the bottom-half is changed as we - * are sending the wake-up, the new bottom-half will - * be woken by whomever made the change. We only have - * to worry about when we steal the irq-posted for - * ourself. - */ - wake_up_process(b->irq_wait->tsk); - spin_unlock_irq(&b->irq_lock); - - if (__i915_request_completed(rq, seqno)) - return true; - } - - return false; -} - void i915_memcpy_init_early(struct drm_i915_private *dev_priv); bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d36a9755ad91..bf594a5e88bc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -25,18 +25,9 @@ * */ -#include <drm/drmP.h> #include <drm/drm_vma_manager.h> +#include <drm/drm_pci.h> #include <drm/i915_drm.h> -#include "i915_drv.h" -#include "i915_gem_clflush.h" -#include "i915_vgpu.h" -#include "i915_trace.h" -#include "intel_drv.h" -#include "intel_frontbuffer.h" -#include "intel_mocs.h" -#include "intel_workarounds.h" -#include "i915_gemfs.h" #include <linux/dma-fence-array.h> #include <linux/kthread.h> #include <linux/reservation.h> @@ -46,6 +37,20 @@ #include <linux/swap.h> #include <linux/pci.h> #include <linux/dma-buf.h> +#include <linux/mman.h> + +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "i915_gemfs.h" +#include "i915_globals.h" +#include "i915_reset.h" +#include "i915_trace.h" +#include "i915_vgpu.h" + +#include "intel_drv.h" +#include "intel_frontbuffer.h" +#include "intel_mocs.h" +#include "intel_workarounds.h" static void i915_gem_flush_free_objects(struct drm_i915_private *i915); @@ -96,49 +101,10 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->mm.object_stat_lock); } -static int -i915_gem_wait_for_error(struct i915_gpu_error *error) +static void __i915_gem_park(struct drm_i915_private *i915) { - int ret; + intel_wakeref_t wakeref; - might_sleep(); - - /* - * Only wait 10 seconds for the gpu reset to complete to avoid hanging - * userspace. If it takes that long something really bad is going on and - * we should simply try to bail out and fail as gracefully as possible. - */ - ret = wait_event_interruptible_timeout(error->reset_queue, - !i915_reset_backoff(error), - I915_RESET_TIMEOUT); - if (ret == 0) { - DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); - return -EIO; - } else if (ret < 0) { - return ret; - } else { - return 0; - } -} - -int i915_mutex_lock_interruptible(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - ret = i915_gem_wait_for_error(&dev_priv->gpu_error); - if (ret) - return ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - return 0; -} - -static u32 __i915_gem_park(struct drm_i915_private *i915) -{ GEM_TRACE("\n"); lockdep_assert_held(&i915->drm.struct_mutex); @@ -146,9 +112,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) GEM_BUG_ON(!list_empty(&i915->gt.active_rings)); if (!i915->gt.awake) - return I915_EPOCH_INVALID; - - GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID); + return; /* * Be paranoid and flush a concurrent interrupt to make sure @@ -169,16 +133,15 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) i915_pmu_gt_parked(i915); i915_vma_parked(i915); - i915->gt.awake = false; + wakeref = fetch_and_zero(&i915->gt.awake); + GEM_BUG_ON(!wakeref); if (INTEL_GEN(i915) >= 6) gen6_rps_idle(i915); - intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); - intel_runtime_pm_put(i915); - - return i915->gt.epoch; + i915_globals_park(); } void i915_gem_park(struct drm_i915_private *i915) @@ -201,12 +164,11 @@ void i915_gem_unpark(struct drm_i915_private *i915) lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->gt.active_requests); + assert_rpm_wakelock_held(i915); if (i915->gt.awake) return; - intel_runtime_pm_get_noresume(i915); - /* * It seems that the DMC likes to transition between the DC states a lot * when there are no connected displays (no active power domains) during @@ -218,11 +180,10 @@ void i915_gem_unpark(struct drm_i915_private *i915) * Work around it by grabbing a GT IRQ power domain whilst there is any * GT activity, preventing any DC state transitions. */ - intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + GEM_BUG_ON(!i915->gt.awake); - i915->gt.awake = true; - if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ - i915->gt.epoch = 1; + i915_globals_unpark(); intel_enable_gt_powersave(i915); i915_update_gfx_val(i915); @@ -243,21 +204,19 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; struct drm_i915_gem_get_aperture *args = data; struct i915_vma *vma; u64 pinned; + mutex_lock(&ggtt->vm.mutex); + pinned = ggtt->vm.reserved; - mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->vm.active_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) if (i915_vma_is_pinned(vma)) pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link) - if (i915_vma_is_pinned(vma)) - pinned += vma->node.size; - mutex_unlock(&dev->struct_mutex); + + mutex_unlock(&ggtt->vm.mutex); args->aper_size = ggtt->vm.total; args->aper_available_size = args->aper_size - pinned; @@ -349,7 +308,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj) obj->cache_dirty = true; } -static void +void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush) @@ -437,15 +396,19 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (ret) return ret; - while ((vma = list_first_entry_or_null(&obj->vma_list, - struct i915_vma, - obj_link))) { + spin_lock(&obj->vma.lock); + while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, + struct i915_vma, + obj_link))) { list_move_tail(&vma->obj_link, &still_in_list); + spin_unlock(&obj->vma.lock); + ret = i915_vma_unbind(vma); - if (ret) - break; + + spin_lock(&obj->vma.lock); } - list_splice(&still_in_list, &obj->vma_list); + list_splice(&still_in_list, &obj->vma.list); + spin_unlock(&obj->vma.lock); return ret; } @@ -453,8 +416,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) static long i915_gem_object_wait_fence(struct dma_fence *fence, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { struct i915_request *rq; @@ -472,27 +434,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence, if (i915_request_completed(rq)) goto out; - /* - * This client is about to stall waiting for the GPU. In many cases - * this is undesirable and limits the throughput of the system, as - * many clients cannot continue processing user input/output whilst - * blocked. RPS autotuning may take tens of milliseconds to respond - * to the GPU load and thus incurs additional latency for the client. - * We can circumvent that by promoting the GPU frequency to maximum - * before we wait. This makes the GPU throttle up much more quickly - * (good for benchmarks and user experience, e.g. window animations), - * but at a cost of spending more power processing the workload - * (bad for battery). Not all clients even want their results - * immediately and for them we should just let the GPU select its own - * frequency to maximise efficiency. To prevent a single client from - * forcing the clocks too high for the whole system, we only allow - * each client to waitboost once in a busy period. - */ - if (rps_client && !i915_request_started(rq)) { - if (INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq, rps_client); - } - timeout = i915_request_wait(rq, flags, timeout); out: @@ -505,8 +446,7 @@ out: static long i915_gem_object_wait_reservation(struct reservation_object *resv, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { unsigned int seq = __read_seqcount_begin(&resv->seq); struct dma_fence *excl; @@ -524,8 +464,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, for (i = 0; i < count; i++) { timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout, - rps_client); + flags, timeout); if (timeout < 0) break; @@ -551,8 +490,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, } if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout, - rps_client); + timeout = i915_gem_object_wait_fence(excl, flags, timeout); dma_fence_put(excl); @@ -646,35 +584,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, * @obj: i915 gem object * @flags: how to wait (under a lock, for all rendering or just for writes etc) * @timeout: how long to wait - * @rps_client: client (user process) to charge for any waitboosting */ int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { might_sleep(); -#if IS_ENABLED(CONFIG_LOCKDEP) - GEM_BUG_ON(debug_locks && - !!lockdep_is_held(&obj->base.dev->struct_mutex) != - !!(flags & I915_WAIT_LOCKED)); -#endif GEM_BUG_ON(timeout < 0); - timeout = i915_gem_object_wait_reservation(obj->resv, - flags, timeout, - rps_client); + timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); return timeout < 0 ? timeout : 0; } -static struct intel_rps_client *to_rps_client(struct drm_file *file) -{ - struct drm_i915_file_private *fpriv = file->driver_priv; - - return &fpriv->rps_client; -} - static int i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -697,28 +619,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return 0; } -void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) -{ - return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); -} - -void i915_gem_object_free(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - kmem_cache_free(dev_priv->objects, obj); -} - static int i915_gem_create(struct drm_file *file, struct drm_i915_private *dev_priv, - uint64_t size, - uint32_t *handle_p) + u64 *size_p, + u32 *handle_p) { struct drm_i915_gem_object *obj; - int ret; u32 handle; + u64 size; + int ret; - size = roundup(size, PAGE_SIZE); + size = round_up(*size_p, PAGE_SIZE); if (size == 0) return -EINVAL; @@ -734,6 +646,7 @@ i915_gem_create(struct drm_file *file, return ret; *handle_p = handle; + *size_p = obj->base.size; return 0; } @@ -746,7 +659,7 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; return i915_gem_create(file, to_i915(dev), - args->size, &args->handle); + &args->size, &args->handle); } static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) @@ -771,7 +684,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, i915_gem_flush_free_objects(dev_priv); return i915_gem_create(file, dev_priv, - args->size, &args->handle); + &args->size, &args->handle); } static inline enum fb_op_origin @@ -783,6 +696,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) { + intel_wakeref_t wakeref; + /* * No actual flushing is required for the GTT write domain for reads * from the GTT domain. Writes to it "immediately" go to main memory @@ -809,13 +724,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); - intel_runtime_pm_get(dev_priv); - spin_lock_irq(&dev_priv->uncore.lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); + POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put(dev_priv); + spin_unlock_irq(&dev_priv->uncore.lock); + } } static void @@ -859,58 +774,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) obj->write_domain = 0; } -static inline int -__copy_to_user_swizzled(char __user *cpu_vaddr, - const char *gpu_vaddr, int gpu_offset, - int length) -{ - int ret, cpu_offset = 0; - - while (length > 0) { - int cacheline_end = ALIGN(gpu_offset + 1, 64); - int this_length = min(cacheline_end - gpu_offset, length); - int swizzled_gpu_offset = gpu_offset ^ 64; - - ret = __copy_to_user(cpu_vaddr + cpu_offset, - gpu_vaddr + swizzled_gpu_offset, - this_length); - if (ret) - return ret + length; - - cpu_offset += this_length; - gpu_offset += this_length; - length -= this_length; - } - - return 0; -} - -static inline int -__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, - const char __user *cpu_vaddr, - int length) -{ - int ret, cpu_offset = 0; - - while (length > 0) { - int cacheline_end = ALIGN(gpu_offset + 1, 64); - int this_length = min(cacheline_end - gpu_offset, length); - int swizzled_gpu_offset = gpu_offset ^ 64; - - ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, - cpu_vaddr + cpu_offset, - this_length); - if (ret) - return ret + length; - - cpu_offset += this_length; - gpu_offset += this_length; - length -= this_length; - } - - return 0; -} - /* * Pins the specified object's pages and synchronizes the object with * GPU accesses. Sets needs_clflush to non-zero if the caller should @@ -930,8 +793,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -983,8 +845,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -1030,72 +891,23 @@ err_unpin: return ret; } -static void -shmem_clflush_swizzled_range(char *addr, unsigned long length, - bool swizzled) -{ - if (unlikely(swizzled)) { - unsigned long start = (unsigned long) addr; - unsigned long end = (unsigned long) addr + length; - - /* For swizzling simply ensure that we always flush both - * channels. Lame, but simple and it works. Swizzled - * pwrite/pread is far from a hotpath - current userspace - * doesn't use it at all. */ - start = round_down(start, 128); - end = round_up(end, 128); - - drm_clflush_virt_range((void *)start, end - start); - } else { - drm_clflush_virt_range(addr, length); - } - -} - -/* Only difference to the fast-path function is that this can handle bit17 - * and uses non-atomic copy and kmap functions. */ static int -shmem_pread_slow(struct page *page, int offset, int length, - char __user *user_data, - bool page_do_bit17_swizzling, bool needs_clflush) +shmem_pread(struct page *page, int offset, int len, char __user *user_data, + bool needs_clflush) { char *vaddr; int ret; vaddr = kmap(page); - if (needs_clflush) - shmem_clflush_swizzled_range(vaddr + offset, length, - page_do_bit17_swizzling); - if (page_do_bit17_swizzling) - ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); - else - ret = __copy_to_user(user_data, vaddr + offset, length); - kunmap(page); - - return ret ? - EFAULT : 0; -} - -static int -shmem_pread(struct page *page, int offset, int length, char __user *user_data, - bool page_do_bit17_swizzling, bool needs_clflush) -{ - int ret; + if (needs_clflush) + drm_clflush_virt_range(vaddr + offset, len); - ret = -ENODEV; - if (!page_do_bit17_swizzling) { - char *vaddr = kmap_atomic(page); + ret = __copy_to_user(user_data, vaddr + offset, len); - if (needs_clflush) - drm_clflush_virt_range(vaddr + offset, length); - ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); - kunmap_atomic(vaddr); - } - if (ret == 0) - return 0; + kunmap(page); - return shmem_pread_slow(page, offset, length, user_data, - page_do_bit17_swizzling, needs_clflush); + return ret ? -EFAULT : 0; } static int @@ -1104,15 +916,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, { char __user *user_data; u64 remain; - unsigned int obj_do_bit17_swizzling; unsigned int needs_clflush; unsigned int idx, offset; int ret; - obj_do_bit17_swizzling = 0; - if (i915_gem_object_needs_bit17_swizzle(obj)) - obj_do_bit17_swizzling = BIT(17); - ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); if (ret) return ret; @@ -1130,7 +937,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pread(page, offset, length, user_data, - page_to_phys(page) & obj_do_bit17_swizzling, needs_clflush); if (ret) break; @@ -1174,6 +980,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; struct drm_mm_node node; struct i915_vma *vma; void __user *user_data; @@ -1184,7 +991,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONFAULT | @@ -1257,7 +1064,7 @@ out_unpin: i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1282,8 +1089,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (args->size == 0) return 0; - if (!access_ok(VERIFY_WRITE, - u64_to_user_ptr(args->data_ptr), + if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; @@ -1301,8 +1107,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -1359,6 +1164,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; struct drm_mm_node node; struct i915_vma *vma; u64 remain, offset; @@ -1377,13 +1183,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * This easily dwarfs any performance advantage from * using the cache bypass of indirect GGTT access. */ - if (!intel_runtime_pm_get_if_in_use(i915)) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (!wakeref) { ret = -EFAULT; goto out_unlock; } } else { /* No backing pages, no fallback, we must force GGTT access */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -1465,39 +1272,12 @@ out_unpin: i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; } -static int -shmem_pwrite_slow(struct page *page, int offset, int length, - char __user *user_data, - bool page_do_bit17_swizzling, - bool needs_clflush_before, - bool needs_clflush_after) -{ - char *vaddr; - int ret; - - vaddr = kmap(page); - if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) - shmem_clflush_swizzled_range(vaddr + offset, length, - page_do_bit17_swizzling); - if (page_do_bit17_swizzling) - ret = __copy_from_user_swizzled(vaddr, offset, user_data, - length); - else - ret = __copy_from_user(vaddr + offset, user_data, length); - if (needs_clflush_after) - shmem_clflush_swizzled_range(vaddr + offset, length, - page_do_bit17_swizzling); - kunmap(page); - - return ret ? -EFAULT : 0; -} - /* Per-page copy function for the shmem pwrite fastpath. * Flushes invalid cachelines before writing to the target if * needs_clflush_before is set and flushes out any written cachelines after @@ -1505,31 +1285,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length, */ static int shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, - bool page_do_bit17_swizzling, bool needs_clflush_before, bool needs_clflush_after) { + char *vaddr; int ret; - ret = -ENODEV; - if (!page_do_bit17_swizzling) { - char *vaddr = kmap_atomic(page); + vaddr = kmap(page); - if (needs_clflush_before) - drm_clflush_virt_range(vaddr + offset, len); - ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); - if (needs_clflush_after) - drm_clflush_virt_range(vaddr + offset, len); + if (needs_clflush_before) + drm_clflush_virt_range(vaddr + offset, len); - kunmap_atomic(vaddr); - } - if (ret == 0) - return ret; + ret = __copy_from_user(vaddr + offset, user_data, len); + if (!ret && needs_clflush_after) + drm_clflush_virt_range(vaddr + offset, len); + + kunmap(page); - return shmem_pwrite_slow(page, offset, len, user_data, - page_do_bit17_swizzling, - needs_clflush_before, - needs_clflush_after); + return ret ? -EFAULT : 0; } static int @@ -1539,7 +1312,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_private *i915 = to_i915(obj->base.dev); void __user *user_data; u64 remain; - unsigned int obj_do_bit17_swizzling; unsigned int partial_cacheline_write; unsigned int needs_clflush; unsigned int offset, idx; @@ -1554,10 +1326,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; - obj_do_bit17_swizzling = 0; - if (i915_gem_object_needs_bit17_swizzle(obj)) - obj_do_bit17_swizzling = BIT(17); - /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. @@ -1574,7 +1342,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pwrite(page, offset, length, user_data, - page_to_phys(page) & obj_do_bit17_swizzling, (offset | length) & partial_cacheline_write, needs_clflush & CLFLUSH_AFTER); if (ret) @@ -1609,9 +1376,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (args->size == 0) return 0; - if (!access_ok(VERIFY_READ, - u64_to_user_ptr(args->data_ptr), - args->size)) + if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; obj = i915_gem_object_lookup(file, args->handle); @@ -1641,8 +1406,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto err; @@ -1680,23 +1444,21 @@ err: static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915; + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct list_head *list; struct i915_vma *vma; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + mutex_lock(&i915->ggtt.vm.mutex); for_each_ggtt_vma(vma, obj) { - if (i915_vma_is_active(vma)) - continue; - if (!drm_mm_node_allocated(&vma->node)) continue; - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); } + mutex_unlock(&i915->ggtt.vm.mutex); - i915 = to_i915(obj->base.dev); spin_lock(&i915->mm.obj_lock); list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; list_move_tail(&obj->mm.link, list); @@ -1716,25 +1478,45 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_object *obj; - uint32_t read_domains = args->read_domains; - uint32_t write_domain = args->write_domain; + u32 read_domains = args->read_domains; + u32 write_domain = args->write_domain; int err; /* Only handle setting domains to types used by the CPU. */ if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) return -EINVAL; - /* Having something in the write domain implies it's in the read + /* + * Having something in the write domain implies it's in the read * domain, and only that read domain. Enforce that in the request. */ - if (write_domain != 0 && read_domains != write_domain) + if (write_domain && read_domains != write_domain) return -EINVAL; + if (!read_domains) + return 0; + obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; - /* Try to flush the object off the GPU without holding the lock. + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) { + err = 0; + goto out; + } + + /* + * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ @@ -1742,8 +1524,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (err) goto out; @@ -1827,6 +1608,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } +static inline bool +__vma_matches(struct vm_area_struct *vma, struct file *filp, + unsigned long addr, unsigned long size) +{ + if (vma->vm_file != filp) + return false; + + return vma->vm_start == addr && + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); +} + /** * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address * it is mapped to. @@ -1869,39 +1661,50 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * pages from. */ if (!obj->base.filp) { - i915_gem_object_put(obj); - return -ENXIO; + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); + if (IS_ERR_VALUE(addr)) + goto err; + if (args->flags & I915_MMAP_WC) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - i915_gem_object_put(obj); - return -EINTR; + addr = -EINTR; + goto err; } vma = find_vma(mm, addr); - if (vma) + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else addr = -ENOMEM; up_write(&mm->mmap_sem); + if (IS_ERR_VALUE(addr)) + goto err; /* This may race, but that's ok, it only gets set */ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); } i915_gem_object_put(obj); - if (IS_ERR((void *)addr)) - return addr; - - args->addr_ptr = (uint64_t) addr; + args->addr_ptr = (u64)addr; return 0; + +err: + i915_gem_object_put(obj); + return addr; } static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) @@ -1932,6 +1735,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 2 - Recognise WC as a separate cache domain so that we can flush the * delayed writes via GTT before performing direct access via WC. * + * 3 - Remove implicit set-domain(GTT) and synchronisation on initial + * pagefault; swapin remains transparent. + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -1959,7 +1765,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 2; + return 3; } static inline struct i915_ggtt_view @@ -2012,8 +1818,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool write = area->vm_flags & VM_WRITE; + intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; + int srcu; int ret; /* Sanity check that we allow writing into this object */ @@ -2025,27 +1833,21 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); - /* Try to flush the object off the GPU first without holding the lock. - * Upon acquiring the lock, we will perform our sanity checks and then - * repeat the flush holding the lock in the normal manner to catch cases - * where we are gazumped. - */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - NULL); - if (ret) - goto err; - ret = i915_gem_object_pin_pages(obj); if (ret) goto err; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); + + srcu = i915_reset_trylock(dev_priv); + if (srcu < 0) { + ret = srcu; + goto err_rpm; + } ret = i915_mutex_lock_interruptible(dev); if (ret) - goto err_rpm; + goto err_reset; /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { @@ -2053,7 +1855,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | @@ -2087,10 +1888,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto err_unpin; - ret = i915_vma_pin_fence(vma); if (ret) goto err_unpin; @@ -2118,8 +1915,10 @@ err_unpin: __i915_vma_unpin(vma); err_unlock: mutex_unlock(&dev->struct_mutex); +err_reset: + i915_reset_unlock(dev_priv, srcu); err_rpm: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -2130,7 +1929,7 @@ err: * fail). But any other -EIO isn't ours (e.g. swap in failure) * and so needs to be reported. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) + if (!i915_terminally_wedged(dev_priv)) return VM_FAULT_SIGBUS; /* else: fall through */ case -EAGAIN: @@ -2192,6 +1991,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; /* Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user @@ -2202,7 +2002,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (!obj->userfault_count) goto out; @@ -2219,7 +2019,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) @@ -2299,8 +2099,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, - uint64_t *offset) + u32 handle, + u64 *offset) { struct drm_i915_gem_object *obj; int ret; @@ -2402,7 +2202,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, struct page *page; __i915_gem_object_release_shmem(obj, pages, true); - i915_gem_gtt_finish_pages(obj, pages); if (i915_gem_object_needs_bit17_swizzle(obj)) @@ -2447,8 +2246,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) struct sg_table *pages; pages = fetch_and_zero(&obj->mm.pages); - if (!pages) - return NULL; + if (IS_ERR_OR_NULL(pages)) + return pages; spin_lock(&i915->mm.obj_lock); list_del(&obj->mm.link); @@ -2472,22 +2271,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) return pages; } -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass) +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass) { struct sg_table *pages; + int ret; if (i915_gem_object_has_pinned_pages(obj)) - return; + return -EBUSY; GEM_BUG_ON(obj->bind_count); - if (!i915_gem_object_has_pages(obj)) - return; /* May be called by shrinker from within get_pages() (on another bo) */ mutex_lock_nested(&obj->mm.lock, subclass); - if (unlikely(atomic_read(&obj->mm.pages_pin_count))) + if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { + ret = -EBUSY; goto unlock; + } /* * ->put_pages might need to allocate memory for the bit17 swizzle @@ -2495,11 +2295,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, * lists early. */ pages = __i915_gem_object_unset_pages(obj); + + /* + * XXX Temporary hijinx to avoid updating all backends to handle + * NULL pages. In the future, when we have more asynchronous + * get_pages backends we should be better able to handle the + * cancellation of the async task in a more uniform manner. + */ + if (!pages && !i915_gem_object_needs_async_cancel(obj)) + pages = ERR_PTR(-EINVAL); + if (!IS_ERR(pages)) obj->ops->put_pages(obj, pages); + ret = 0; unlock: mutex_unlock(&obj->mm.lock); + + return ret; } bool i915_sg_trim(struct sg_table *orig_st) @@ -2559,7 +2372,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (page_count > totalram_pages) + if (page_count > totalram_pages()) return -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -2596,7 +2409,7 @@ rebuild_st: do { cond_resched(); page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (likely(!IS_ERR(page))) + if (!IS_ERR(page)) break; if (!*s) { @@ -2730,6 +2543,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, lockdep_assert_held(&obj->mm.lock); + /* Make the pages coherent with the GPU (flushing any swapin). */ + if (obj->cache_dirty) { + obj->write_domain = 0; + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(pages); + obj->cache_dirty = false; + } + obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; @@ -2931,6 +2752,33 @@ err_unlock: goto out_unlock; } +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size) +{ + enum i915_map_type has_type; + void *ptr; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), + offset, size, obj->base.size)); + + obj->mm.dirty = true; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) + return; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (has_type == I915_MAP_WC) + return; + + drm_clflush_virt_range(ptr + offset, size); + if (size == obj->base.size) { + obj->write_domain &= ~I915_GEM_DOMAIN_CPU; + obj->cache_dirty = false; + } +} + static int i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) @@ -2940,7 +2788,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, u64 remain, offset; unsigned int pg; - /* Before we instantiate/pin the backing store for our use, we + /* Caller already validated user args */ + GEM_BUG_ON(!access_ok(user_data, arg->size)); + + /* + * Before we instantiate/pin the backing store for our use, we * can prepopulate the shmemfs filp efficiently using a write into * the pagecache. We avoid the penalty of instantiating all the * pages, important if the user is just writing to a few and never @@ -2954,7 +2806,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; - /* Before the pages are instantiated the object is treated as being + /* + * Before the pages are instantiated the object is treated as being * in the CPU domain. The pages will be clflushed as required before * use, and we can freely write into the pages directly. If userspace * races pwrite with any other operation; corruption will ensue - @@ -2970,20 +2823,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, struct page *page; void *data, *vaddr; int err; + char c; len = PAGE_SIZE - pg; if (len > remain) len = remain; + /* Prefault the user page to reduce potential recursion */ + err = __get_user(c, user_data); + if (err) + return err; + + err = __get_user(c, user_data + len - 1); + if (err) + return err; + err = pagecache_write_begin(obj->base.filp, mapping, offset, len, 0, &page, &data); if (err < 0) return err; - vaddr = kmap(page); - unwritten = copy_from_user(vaddr + pg, user_data, len); - kunmap(page); + vaddr = kmap_atomic(page); + unwritten = __copy_from_user_inatomic(vaddr + pg, + user_data, + len); + kunmap_atomic(vaddr); err = pagecache_write_end(obj->base.filp, mapping, offset, len, len - unwritten, @@ -2991,8 +2856,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, if (err < 0) return err; + /* We don't handle -EFAULT, leave it to the caller to check */ if (unwritten) - return -EFAULT; + return -ENODEV; remain -= len; user_data += len; @@ -3003,451 +2869,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } -static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, - const struct i915_gem_context *ctx) -{ - unsigned int score; - unsigned long prev_hang; - - if (i915_gem_context_is_banned(ctx)) - score = I915_CLIENT_SCORE_CONTEXT_BAN; - else - score = 0; - - prev_hang = xchg(&file_priv->hang_timestamp, jiffies); - if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) - score += I915_CLIENT_SCORE_HANG_FAST; - - if (score) { - atomic_add(score, &file_priv->ban_score); - - DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", - ctx->name, score, - atomic_read(&file_priv->ban_score)); - } -} - -static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) -{ - unsigned int score; - bool banned, bannable; - - atomic_inc(&ctx->guilty_count); - - bannable = i915_gem_context_is_bannable(ctx); - score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - - /* Cool contexts don't accumulate client ban score */ - if (!bannable) - return; - - if (banned) { - DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", - ctx->name, atomic_read(&ctx->guilty_count), - score); - i915_gem_context_set_banned(ctx); - } - - if (!IS_ERR_OR_NULL(ctx->file_priv)) - i915_gem_client_mark_guilty(ctx->file_priv, ctx); -} - -static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) -{ - atomic_inc(&ctx->active_count); -} - -struct i915_request * -i915_gem_find_active_request(struct intel_engine_cs *engine) -{ - struct i915_request *request, *active = NULL; - unsigned long flags; - - /* - * We are called by the error capture, reset and to dump engine - * state at random points in time. In particular, note that neither is - * crucially ordered with an interrupt. After a hang, the GPU is dead - * and we assume that no more writes can happen (we waited long enough - * for all writes that were in transaction to be flushed) - adding an - * extra delay for a recent interrupt is pointless. Hence, we do - * not need an engine->irq_seqno_barrier() before the seqno reads. - * At all other times, we must assume the GPU is still running, but - * we only care about the snapshot of this moment. - */ - spin_lock_irqsave(&engine->timeline.lock, flags); - list_for_each_entry(request, &engine->timeline.requests, link) { - if (__i915_request_completed(request, request->global_seqno)) - continue; - - active = request; - break; - } - spin_unlock_irqrestore(&engine->timeline.lock, flags); - - return active; -} - -/* - * Ensure irq handler finishes, and not run again. - * Also return the active request so that we only search for it once. - */ -struct i915_request * -i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) -{ - struct i915_request *request; - - /* - * During the reset sequence, we must prevent the engine from - * entering RC6. As the context state is undefined until we restart - * the engine, if it does enter RC6 during the reset, the state - * written to the powercontext is undefined and so we may lose - * GPU state upon resume, i.e. fail to restart after a reset. - */ - intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); - - request = engine->reset.prepare(engine); - if (request && request->fence.error == -EIO) - request = ERR_PTR(-EIO); /* Previous reset failed! */ - - return request; -} - -int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - struct i915_request *request; - enum intel_engine_id id; - int err = 0; - - for_each_engine(engine, dev_priv, id) { - request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR(request)) { - err = PTR_ERR(request); - continue; - } - - engine->hangcheck.active_request = request; - } - - i915_gem_revoke_fences(dev_priv); - intel_uc_sanitize(dev_priv); - - return err; -} - -static void engine_skip_context(struct i915_request *request) -{ - struct intel_engine_cs *engine = request->engine; - struct i915_gem_context *hung_ctx = request->gem_context; - struct i915_timeline *timeline = request->timeline; - unsigned long flags; - - GEM_BUG_ON(timeline == &engine->timeline); - - spin_lock_irqsave(&engine->timeline.lock, flags); - spin_lock(&timeline->lock); - - list_for_each_entry_continue(request, &engine->timeline.requests, link) - if (request->gem_context == hung_ctx) - i915_request_skip(request, -EIO); - - list_for_each_entry(request, &timeline->requests, link) - i915_request_skip(request, -EIO); - - spin_unlock(&timeline->lock); - spin_unlock_irqrestore(&engine->timeline.lock, flags); -} - -/* Returns the request if it was guilty of the hang */ -static struct i915_request * -i915_gem_reset_request(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled) -{ - /* The guilty request will get skipped on a hung engine. - * - * Users of client default contexts do not rely on logical - * state preserved between batches so it is safe to execute - * queued requests following the hang. Non default contexts - * rely on preserved state, so skipping a batch loses the - * evolution of the state and it needs to be considered corrupted. - * Executing more queued batches on top of corrupted state is - * risky. But we take the risk by trying to advance through - * the queued requests in order to make the client behaviour - * more predictable around resets, by not throwing away random - * amount of batches it has prepared for execution. Sophisticated - * clients can use gem_reset_stats_ioctl and dma fence status - * (exported via sync_file info ioctl on explicit fences) to observe - * when it loses the context state and should rebuild accordingly. - * - * The context ban, and ultimately the client ban, mechanism are safety - * valves if client submission ends up resulting in nothing more than - * subsequent hangs. - */ - - if (i915_request_completed(request)) { - GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n", - engine->name, request->global_seqno, - request->fence.context, request->fence.seqno, - intel_engine_get_seqno(engine)); - stalled = false; - } - - if (stalled) { - i915_gem_context_mark_guilty(request->gem_context); - i915_request_skip(request, -EIO); - - /* If this context is now banned, skip all pending requests. */ - if (i915_gem_context_is_banned(request->gem_context)) - engine_skip_context(request); - } else { - /* - * Since this is not the hung engine, it may have advanced - * since the hang declaration. Double check by refinding - * the active request at the time of the reset. - */ - request = i915_gem_find_active_request(engine); - if (request) { - unsigned long flags; - - i915_gem_context_mark_innocent(request->gem_context); - dma_fence_set_error(&request->fence, -EAGAIN); - - /* Rewind the engine to replay the incomplete rq */ - spin_lock_irqsave(&engine->timeline.lock, flags); - request = list_prev_entry(request, link); - if (&request->link == &engine->timeline.requests) - request = NULL; - spin_unlock_irqrestore(&engine->timeline.lock, flags); - } - } - - return request; -} - -void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled) -{ - /* - * Make sure this write is visible before we re-enable the interrupt - * handlers on another CPU, as tasklet_enable() resolves to just - * a compiler barrier which is insufficient for our purpose here. - */ - smp_store_mb(engine->irq_posted, 0); - - if (request) - request = i915_gem_reset_request(engine, request, stalled); - - /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset.reset(engine, request); -} - -void i915_gem_reset(struct drm_i915_private *dev_priv, - unsigned int stalled_mask) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - i915_retire_requests(dev_priv); - - for_each_engine(engine, dev_priv, id) { - struct intel_context *ce; - - i915_gem_reset_engine(engine, - engine->hangcheck.active_request, - stalled_mask & ENGINE_MASK(id)); - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); - - /* - * Ostensibily, we always want a context loaded for powersaving, - * so if the engine is idle after the reset, send a request - * to load our scratch kernel_context. - * - * More mysteriously, if we leave the engine idle after a reset, - * the next userspace batch may hang, with what appears to be - * an incoherent read by the CS (presumably stale TLB). An - * empty request appears sufficient to paper over the glitch. - */ - if (intel_engine_is_idle(engine)) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, - dev_priv->kernel_context); - if (!IS_ERR(rq)) - i915_request_add(rq); - } - } - - i915_gem_restore_fences(dev_priv); -} - -void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) -{ - engine->reset.finish(engine); - - intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); -} - -void i915_gem_reset_finish(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for_each_engine(engine, dev_priv, id) { - engine->hangcheck.active_request = NULL; - i915_gem_reset_finish_engine(engine); - } -} - -static void nop_submit_request(struct i915_request *request) -{ - unsigned long flags; - - GEM_TRACE("%s fence %llx:%d -> -EIO\n", - request->engine->name, - request->fence.context, request->fence.seqno); - dma_fence_set_error(&request->fence, -EIO); - - spin_lock_irqsave(&request->engine->timeline.lock, flags); - __i915_request_submit(request); - intel_engine_init_global_seqno(request->engine, request->global_seqno); - spin_unlock_irqrestore(&request->engine->timeline.lock, flags); -} - -void i915_gem_set_wedged(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - GEM_TRACE("start\n"); - - if (GEM_SHOW_DEBUG()) { - struct drm_printer p = drm_debug_printer(__func__); - - for_each_engine(engine, i915, id) - intel_engine_dump(engine, &p, "%s\n", engine->name); - } - - if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags)) - goto out; - - /* - * First, stop submission to hw, but do not yet complete requests by - * rolling the global seqno forward (since this would complete requests - * for which we haven't set the fence error to EIO yet). - */ - for_each_engine(engine, i915, id) - i915_gem_reset_prepare_engine(engine); - - /* Even if the GPU reset fails, it should still stop the engines */ - if (INTEL_GEN(i915) >= 5) - intel_gpu_reset(i915, ALL_ENGINES); - - for_each_engine(engine, i915, id) { - engine->submit_request = nop_submit_request; - engine->schedule = NULL; - } - i915->caps.scheduler = 0; - - /* - * Make sure no request can slip through without getting completed by - * either this call here to intel_engine_init_global_seqno, or the one - * in nop_submit_request. - */ - synchronize_rcu(); - - /* Mark all executing requests as skipped */ - for_each_engine(engine, i915, id) - engine->cancel_requests(engine); - - for_each_engine(engine, i915, id) { - i915_gem_reset_finish_engine(engine); - intel_engine_wakeup(engine); - } - -out: - GEM_TRACE("end\n"); - - wake_up_all(&i915->gpu_error.reset_queue); -} - -bool i915_gem_unset_wedged(struct drm_i915_private *i915) -{ - struct i915_timeline *tl; - - lockdep_assert_held(&i915->drm.struct_mutex); - if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) - return true; - - GEM_TRACE("start\n"); - - /* - * Before unwedging, make sure that all pending operations - * are flushed and errored out - we may have requests waiting upon - * third party fences. We marked all inflight requests as EIO, and - * every execbuf since returned EIO, for consistency we want all - * the currently pending requests to also be marked as EIO, which - * is done inside our nop_submit_request - and so we must wait. - * - * No more can be submitted until we reset the wedged bit. - */ - list_for_each_entry(tl, &i915->gt.timelines, link) { - struct i915_request *rq; - - rq = i915_gem_active_peek(&tl->last_request, - &i915->drm.struct_mutex); - if (!rq) - continue; - - /* - * We can't use our normal waiter as we want to - * avoid recursively trying to handle the current - * reset. The basic dma_fence_default_wait() installs - * a callback for dma_fence_signal(), which is - * triggered by our nop handler (indirectly, the - * callback enables the signaler thread which is - * woken by the nop_submit_request() advancing the seqno - * and when the seqno passes the fence, the signaler - * then signals the fence waking us up). - */ - if (dma_fence_default_wait(&rq->fence, true, - MAX_SCHEDULE_TIMEOUT) < 0) - return false; - } - i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); - - if (!intel_gpu_reset(i915, ALL_ENGINES)) - intel_engines_sanitize(i915); - - /* - * Undo nop_submit_request. We prevent all new i915 requests from - * being queued (by disallowing execbuf whilst wedged) so having - * waited for all active requests above, we know the system is idle - * and do not have to worry about a thread being inside - * engine->submit_request() as we swap over. So unlike installing - * the nop_submit_request on reset, we can do this from normal - * context and do not require stop_machine(). - */ - intel_engines_reset_default_submission(i915); - i915_gem_contexts_lost(i915); - - GEM_TRACE("end\n"); - - smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ - clear_bit(I915_WEDGED, &i915->gpu_error.flags); - - return true; -} - static void i915_gem_retire_work_handler(struct work_struct *work) { @@ -3472,180 +2893,105 @@ i915_gem_retire_work_handler(struct work_struct *work) round_jiffies_up_relative(HZ)); } -static void shrink_caches(struct drm_i915_private *i915) +static bool switch_to_kernel_context_sync(struct drm_i915_private *i915, + unsigned long mask) { - /* - * kmem_cache_shrink() discards empty slabs and reorders partially - * filled slabs to prioritise allocating from the mostly full slabs, - * with the aim of reducing fragmentation. - */ - kmem_cache_shrink(i915->priorities); - kmem_cache_shrink(i915->dependencies); - kmem_cache_shrink(i915->requests); - kmem_cache_shrink(i915->luts); - kmem_cache_shrink(i915->vmas); - kmem_cache_shrink(i915->objects); -} - -struct sleep_rcu_work { - union { - struct rcu_head rcu; - struct work_struct work; - }; - struct drm_i915_private *i915; - unsigned int epoch; -}; + bool result = true; -static inline bool -same_epoch(struct drm_i915_private *i915, unsigned int epoch) -{ /* - * There is a small chance that the epoch wrapped since we started - * sleeping. If we assume that epoch is at least a u32, then it will - * take at least 2^32 * 100ms for it to wrap, or about 326 years. + * Even if we fail to switch, give whatever is running a small chance + * to save itself before we report the failure. Yes, this may be a + * false positive due to e.g. ENOMEM, caveat emptor! */ - return epoch == READ_ONCE(i915->gt.epoch); -} - -static void __sleep_work(struct work_struct *work) -{ - struct sleep_rcu_work *s = container_of(work, typeof(*s), work); - struct drm_i915_private *i915 = s->i915; - unsigned int epoch = s->epoch; - - kfree(s); - if (same_epoch(i915, epoch)) - shrink_caches(i915); -} + if (i915_gem_switch_to_kernel_context(i915, mask)) + result = false; -static void __sleep_rcu(struct rcu_head *rcu) -{ - struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); - struct drm_i915_private *i915 = s->i915; - - destroy_rcu_head(&s->rcu); + if (i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + I915_GEM_IDLE_TIMEOUT)) + result = false; + + if (!result) { + if (i915_modparams.reset) { /* XXX hide warning from gem_eio */ + dev_err(i915->drm.dev, + "Failed to idle engines, declaring wedged!\n"); + GEM_TRACE_DUMP(); + } - if (same_epoch(i915, s->epoch)) { - INIT_WORK(&s->work, __sleep_work); - queue_work(i915->wq, &s->work); - } else { - kfree(s); + /* Forcibly cancel outstanding work and leave the gpu quiet. */ + i915_gem_set_wedged(i915); } -} -static inline bool -new_requests_since_last_retire(const struct drm_i915_private *i915) -{ - return (READ_ONCE(i915->gt.active_requests) || - work_pending(&i915->gt.idle_work.work)); + i915_retire_requests(i915); /* ensure we flush after wedging */ + return result; } -static void assert_kernel_context_is_current(struct drm_i915_private *i915) +static bool load_power_context(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; + /* Force loading the kernel context on all engines */ + if (!switch_to_kernel_context_sync(i915, ALL_ENGINES)) + return false; - if (i915_terminally_wedged(&i915->gpu_error)) - return; + /* + * Immediately park the GPU so that we enable powersaving and + * treat it as idle. The next time we issue a request, we will + * unpark and start using the engine->pinned_default_state, otherwise + * it is in limbo and an early reset may fail. + */ + __i915_gem_park(i915); - GEM_BUG_ON(i915->gt.active_requests); - for_each_engine(engine, i915, id) { - GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request)); - GEM_BUG_ON(engine->last_retired_context != - to_intel_context(i915->kernel_context, engine)); - } + return true; } static void i915_gem_idle_work_handler(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), gt.idle_work.work); - unsigned int epoch = I915_EPOCH_INVALID; + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gt.idle_work.work); bool rearm_hangcheck; - if (!READ_ONCE(dev_priv->gt.awake)) + if (!READ_ONCE(i915->gt.awake)) return; - if (READ_ONCE(dev_priv->gt.active_requests)) + if (READ_ONCE(i915->gt.active_requests)) return; - /* - * Flush out the last user context, leaving only the pinned - * kernel context resident. When we are idling on the kernel_context, - * no more new requests (with a context switch) are emitted and we - * can finally rest. A consequence is that the idle work handler is - * always called at least twice before idling (and if the system is - * idle that implies a round trip through the retire worker). - */ - mutex_lock(&dev_priv->drm.struct_mutex); - i915_gem_switch_to_kernel_context(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n", - READ_ONCE(dev_priv->gt.active_requests)); - - /* - * Wait for last execlists context complete, but bail out in case a - * new request is submitted. As we don't trust the hardware, we - * continue on if the wait times out. This is necessary to allow - * the machine to suspend even if the hardware dies, and we will - * try to recover in resume (after depriving the hardware of power, - * it may be in a better mmod). - */ - __wait_for(if (new_requests_since_last_retire(dev_priv)) return, - intel_engines_are_idle(dev_priv), - I915_IDLE_ENGINES_TIMEOUT * 1000, - 10, 500); - rearm_hangcheck = - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); - if (!mutex_trylock(&dev_priv->drm.struct_mutex)) { + if (!mutex_trylock(&i915->drm.struct_mutex)) { /* Currently busy, come back later */ - mod_delayed_work(dev_priv->wq, - &dev_priv->gt.idle_work, + mod_delayed_work(i915->wq, + &i915->gt.idle_work, msecs_to_jiffies(50)); goto out_rearm; } /* - * New request retired after this work handler started, extend active - * period until next instance of the work. + * Flush out the last user context, leaving only the pinned + * kernel context resident. Should anything unfortunate happen + * while we are idle (such as the GPU being power cycled), no users + * will be harmed. */ - if (new_requests_since_last_retire(dev_priv)) - goto out_unlock; + if (!work_pending(&i915->gt.idle_work.work) && + !i915->gt.active_requests) { + ++i915->gt.active_requests; /* don't requeue idle */ - epoch = __i915_gem_park(dev_priv); + switch_to_kernel_context_sync(i915, i915->gt.active_engines); - assert_kernel_context_is_current(dev_priv); + if (!--i915->gt.active_requests) { + __i915_gem_park(i915); + rearm_hangcheck = false; + } + } - rearm_hangcheck = false; -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&i915->drm.struct_mutex); out_rearm: if (rearm_hangcheck) { - GEM_BUG_ON(!dev_priv->gt.awake); - i915_queue_hangcheck(dev_priv); - } - - /* - * When we are idle, it is an opportune time to reap our caches. - * However, we have many objects that utilise RCU and the ordered - * i915->wq that this work is executing on. To try and flush any - * pending frees now we are idle, we first wait for an RCU grace - * period, and then queue a task (that will run last on the wq) to - * shrink and re-optimize the caches. - */ - if (same_epoch(dev_priv, epoch)) { - struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); - if (s) { - init_rcu_head(&s->rcu); - s->i915 = dev_priv; - s->epoch = epoch; - call_rcu(&s->rcu, __sleep_rcu); - } + GEM_BUG_ON(!i915->gt.awake); + i915_queue_hangcheck(i915); } } @@ -3679,7 +3025,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) list_del(&lut->obj_link); list_del(&lut->ctx_link); - kmem_cache_free(i915->luts, lut); + i915_lut_handle_free(lut); __i915_gem_object_release_unless_active(obj); } @@ -3742,8 +3088,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | I915_WAIT_ALL, - to_wait_timeout(args->timeout_ns), - to_rps_client(file)); + to_wait_timeout(args->timeout_ns)); if (args->timeout_ns > 0) { args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); @@ -3769,33 +3114,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static long wait_for_timeline(struct i915_timeline *tl, - unsigned int flags, long timeout) -{ - struct i915_request *rq; - - rq = i915_gem_active_get_unlocked(&tl->last_request); - if (!rq) - return timeout; - - /* - * "Race-to-idle". - * - * Switching to the kernel context is often used a synchronous - * step prior to idling, e.g. in suspend for flushing all - * current operations to memory before sleeping. These we - * want to complete as quickly as possible to avoid prolonged - * stalls, so allow the gpu to boost to maximum clocks. - */ - if (flags & I915_WAIT_FOR_IDLE_BOOST) - gen6_rps_boost(rq, NULL); - - timeout = i915_request_wait(rq, flags, timeout); - i915_request_put(rq); - - return timeout; -} - static int wait_for_engines(struct drm_i915_private *i915) { if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { @@ -3809,6 +3127,52 @@ static int wait_for_engines(struct drm_i915_private *i915) return 0; } +static long +wait_for_timelines(struct drm_i915_private *i915, + unsigned int flags, long timeout) +{ + struct i915_gt_timelines *gt = &i915->gt.timelines; + struct i915_timeline *tl; + + if (!READ_ONCE(i915->gt.active_requests)) + return timeout; + + mutex_lock(>->mutex); + list_for_each_entry(tl, >->active_list, link) { + struct i915_request *rq; + + rq = i915_active_request_get_unlocked(&tl->last_request); + if (!rq) + continue; + + mutex_unlock(>->mutex); + + /* + * "Race-to-idle". + * + * Switching to the kernel context is often used a synchronous + * step prior to idling, e.g. in suspend for flushing all + * current operations to memory before sleeping. These we + * want to complete as quickly as possible to avoid prolonged + * stalls, so allow the gpu to boost to maximum clocks. + */ + if (flags & I915_WAIT_FOR_IDLE_BOOST) + gen6_rps_boost(rq); + + timeout = i915_request_wait(rq, flags, timeout); + i915_request_put(rq); + if (timeout < 0) + return timeout; + + /* restart after reacquiring the lock */ + mutex_lock(>->mutex); + tl = list_entry(>->active_list, typeof(*tl), link); + } + mutex_unlock(>->mutex); + + return timeout; +} + int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags, long timeout) { @@ -3820,41 +3184,20 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, if (!READ_ONCE(i915->gt.awake)) return 0; + timeout = wait_for_timelines(i915, flags, timeout); + if (timeout < 0) + return timeout; + if (flags & I915_WAIT_LOCKED) { - struct i915_timeline *tl; int err; lockdep_assert_held(&i915->drm.struct_mutex); - list_for_each_entry(tl, &i915->gt.timelines, link) { - timeout = wait_for_timeline(tl, flags, timeout); - if (timeout < 0) - return timeout; - } - if (GEM_SHOW_DEBUG() && !timeout) { - /* Presume that timeout was non-zero to begin with! */ - dev_warn(&i915->drm.pdev->dev, - "Missed idle-completion interrupt!\n"); - GEM_TRACE_DUMP(); - } - err = wait_for_engines(i915); if (err) return err; i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); - } else { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - struct i915_timeline *tl = &engine->timeline; - - timeout = wait_for_timeline(tl, flags, timeout); - if (timeout < 0) - return timeout; - } } return 0; @@ -3901,8 +3244,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3964,8 +3306,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -4040,7 +3381,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * reading an invalid PTE on older architectures. */ restart: - list_for_each_entry(vma, &obj->vma_list, obj_link) { + list_for_each_entry(vma, &obj->vma.list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -4080,8 +3421,7 @@ restart: I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -4118,7 +3458,7 @@ restart: */ } - list_for_each_entry(vma, &obj->vma_list, obj_link) { + list_for_each_entry(vma, &obj->vma.list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -4128,7 +3468,7 @@ restart: } } - list_for_each_entry(vma, &obj->vma_list, obj_link) + list_for_each_entry(vma, &obj->vma.list, obj_link) vma->node.color = cache_level; i915_gem_object_set_cache_coherency(obj, cache_level); obj->cache_dirty = true; /* Always invalidate stale cachelines */ @@ -4219,8 +3559,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -4346,8 +3685,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -4393,8 +3731,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) long ret; /* ABI: return -EIO if already wedged */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) - return -EIO; + ret = i915_terminally_wedged(dev_priv); + if (ret) + return ret; spin_lock(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_link) { @@ -4470,7 +3809,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, } vma = i915_vma_instance(obj, vm, view); - if (unlikely(IS_ERR(vma))) + if (IS_ERR(vma)) return vma; if (i915_vma_misplaced(vma, size, alignment, flags)) { @@ -4504,20 +3843,17 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, static __always_inline unsigned int __busy_read_flag(unsigned int id) { - /* Note that we could alias engines in the execbuf API, but - * that would be very unwise as it prevents userspace from - * fine control over engine selection. Ahem. - * - * This should be something like EXEC_MAX_ENGINE instead of - * I915_NUM_ENGINES. - */ - BUILD_BUG_ON(I915_NUM_ENGINES > 16); + if (id == I915_ENGINE_CLASS_INVALID) + return 0xffff0000; + + GEM_BUG_ON(id >= 16); return 0x10000 << id; } static __always_inline unsigned int __busy_write_id(unsigned int id) { - /* The uABI guarantees an active writer is also amongst the read + /* + * The uABI guarantees an active writer is also amongst the read * engines. This would be true if we accessed the activity tracking * under the lock, but as we perform the lookup of the object and * its activity locklessly we can not guarantee that the last_write @@ -4525,16 +3861,20 @@ static __always_inline unsigned int __busy_write_id(unsigned int id) * last_read - hence we always set both read and write busy for * last_write. */ - return id | __busy_read_flag(id); + if (id == I915_ENGINE_CLASS_INVALID) + return 0xffffffff; + + return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int __busy_set_if_active(const struct dma_fence *fence, unsigned int (*flag)(unsigned int id)) { - struct i915_request *rq; + const struct i915_request *rq; - /* We have to check the current hw status of the fence as the uABI + /* + * We have to check the current hw status of the fence as the uABI * guarantees forward progress. We could rely on the idle worker * to eventually flush us, but to minimise latency just ask the * hardware. @@ -4545,11 +3885,11 @@ __busy_set_if_active(const struct dma_fence *fence, return 0; /* opencode to_request() in order to avoid const warnings */ - rq = container_of(fence, struct i915_request, fence); + rq = container_of(fence, const struct i915_request, fence); if (i915_request_completed(rq)) return 0; - return flag(rq->engine->uabi_id); + return flag(rq->engine->uabi_class); } static __always_inline unsigned int @@ -4583,7 +3923,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (!obj) goto out; - /* A discrepancy here is that we do not report the status of + /* + * A discrepancy here is that we do not report the status of * non-i915 fences, i.e. even though we may report the object as idle, * a call to set-domain may still stall waiting for foreign rendering. * This also means that wait-ioctl may report an object as busy, @@ -4691,7 +4032,8 @@ out: } static void -frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request) +frontbuffer_retire(struct i915_active_request *active, + struct i915_request *request) { struct drm_i915_gem_object *obj = container_of(active, typeof(*obj), frontbuffer_write); @@ -4704,7 +4046,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, { mutex_init(&obj->mm.lock); - INIT_LIST_HEAD(&obj->vma_list); + spin_lock_init(&obj->vma.lock); + INIT_LIST_HEAD(&obj->vma.list); + INIT_LIST_HEAD(&obj->lut_list); INIT_LIST_HEAD(&obj->batch_pool_link); @@ -4716,7 +4060,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->resv = &obj->__builtin_resv; obj->frontbuffer_ggtt_origin = ORIGIN_GTT; - init_request_active(&obj->frontbuffer_write, frontbuffer_retire); + i915_active_request_init(&obj->frontbuffer_write, + NULL, frontbuffer_retire); obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); @@ -4779,7 +4124,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return ERR_PTR(-ENOMEM); @@ -4859,8 +4204,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { struct drm_i915_gem_object *obj, *on; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); llist_for_each_entry_safe(obj, on, freed, freed) { struct i915_vma *vma, *vn; @@ -4869,14 +4215,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); GEM_BUG_ON(i915_gem_object_is_active(obj)); - list_for_each_entry_safe(vma, vn, - &obj->vma_list, obj_link) { + list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { GEM_BUG_ON(i915_vma_is_active(vma)); vma->flags &= ~I915_VMA_PIN_MASK; i915_vma_destroy(vma); } - GEM_BUG_ON(!list_empty(&obj->vma_list)); - GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); + GEM_BUG_ON(!list_empty(&obj->vma.list)); + GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); /* This serializes freeing with the shrinker. Since the free * is delayed, first by RCU then by the workqueue, we want the @@ -4912,7 +4257,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); - kfree(obj->bit_17); + bitmap_free(obj->bit_17); i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); @@ -4921,7 +4266,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (on) cond_resched(); } - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } static void i915_gem_flush_free_objects(struct drm_i915_private *i915) @@ -5030,14 +4375,12 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) void i915_gem_sanitize(struct drm_i915_private *i915) { - int err; + intel_wakeref_t wakeref; GEM_TRACE("\n"); - mutex_lock(&i915->drm.struct_mutex); - - intel_runtime_pm_get(i915); - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + wakeref = intel_runtime_pm_get(i915); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); /* * As we have just resumed the machine and woken the device up from @@ -5045,7 +4388,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * back to defaults, recovering from whatever wedged state we left it * in and so worth trying to use the device once more. */ - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) i915_gem_unset_wedged(i915); /* @@ -5056,27 +4399,25 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ - err = -ENODEV; - if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915)) - err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); - if (!err) - intel_engines_sanitize(i915); + intel_engines_sanitize(i915, false); - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); - intel_runtime_pm_put(i915); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(i915, wakeref); + mutex_lock(&i915->drm.struct_mutex); i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); } -int i915_gem_suspend(struct drm_i915_private *i915) +void i915_gem_suspend(struct drm_i915_private *i915) { - int ret; + intel_wakeref_t wakeref; GEM_TRACE("\n"); - intel_runtime_pm_get(i915); - intel_suspend_gt_powersave(i915); + wakeref = intel_runtime_pm_get(i915); + + flush_workqueue(i915->wq); mutex_lock(&i915->drm.struct_mutex); @@ -5089,29 +4430,12 @@ int i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - if (!i915_terminally_wedged(&i915->gpu_error)) { - ret = i915_gem_switch_to_kernel_context(i915); - if (ret) - goto err_unlock; - - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST, - MAX_SCHEDULE_TIMEOUT); - if (ret && ret != -EIO) - goto err_unlock; - - assert_kernel_context_is_current(i915); - } - i915_retire_requests(i915); /* ensure we flush after wedging */ + switch_to_kernel_context_sync(i915, i915->gt.active_engines); mutex_unlock(&i915->drm.struct_mutex); + i915_reset_flush(i915); - intel_uc_suspend(i915); - - cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.retire_work); /* * As the idle_work is rearming if it detects a race, play safe and @@ -5123,17 +4447,11 @@ int i915_gem_suspend(struct drm_i915_private *i915) * Assert that we successfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(i915->gt.awake); - if (WARN_ON(!intel_engines_are_idle(i915))) - i915_gem_set_wedged(i915); /* no hope, discard everything */ + GEM_BUG_ON(i915->gt.awake); - intel_runtime_pm_put(i915); - return 0; + intel_uc_suspend(i915); -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915); - return ret; + intel_runtime_pm_put(i915, wakeref); } void i915_gem_suspend_late(struct drm_i915_private *i915) @@ -5183,7 +4501,7 @@ void i915_gem_resume(struct drm_i915_private *i915) WARN_ON(i915->gt.awake); mutex_lock(&i915->drm.struct_mutex); - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); @@ -5201,17 +4519,18 @@ void i915_gem_resume(struct drm_i915_private *i915) intel_uc_resume(i915); /* Always reload a context for powersaving. */ - if (i915_gem_switch_to_kernel_context(i915)) + if (!load_power_context(i915)) goto err_wedged; out_unlock: - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); mutex_unlock(&i915->drm.struct_mutex); return; err_wedged: - if (!i915_terminally_wedged(&i915->gpu_error)) { - DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); + if (!i915_reset_failed(i915)) { + dev_err(i915->drm.dev, + "Failed to re-initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(i915); } goto out_unlock; @@ -5226,15 +4545,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_TILE_SURFACE_SWIZZLING); - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) return; I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); - else if (IS_GEN7(dev_priv)) + else if (IS_GEN(dev_priv, 7)) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); - else if (IS_GEN8(dev_priv)) + else if (IS_GEN(dev_priv, 8)) I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); else BUG(); @@ -5256,10 +4575,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv) init_unused_ring(dev_priv, SRB1_BASE); init_unused_ring(dev_priv, SRB2_BASE); init_unused_ring(dev_priv, SRB3_BASE); - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { init_unused_ring(dev_priv, SRB0_BASE); init_unused_ring(dev_priv, SRB1_BASE); - } else if (IS_GEN3(dev_priv)) { + } else if (IS_GEN(dev_priv, 3)) { init_unused_ring(dev_priv, PRB1_BASE); init_unused_ring(dev_priv, PRB2_BASE); } @@ -5281,6 +4600,8 @@ static int __i915_gem_restart_engines(void *data) } } + intel_engines_set_scheduler_caps(i915); + return 0; } @@ -5291,7 +4612,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) dev_priv->gt.last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -5316,10 +4637,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) init_unused_rings(dev_priv); BUG_ON(!dev_priv->kernel_context); - if (i915_terminally_wedged(&dev_priv->gpu_error)) { - ret = -EIO; + ret = i915_terminally_wedged(dev_priv); + if (ret) goto out; - } ret = i915_ppgtt_init_hw(dev_priv); if (ret) { @@ -5347,14 +4667,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) if (ret) goto cleanup_uc; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return 0; cleanup_uc: intel_uc_fini_hw(dev_priv); out: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return ret; } @@ -5364,7 +4684,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) struct i915_gem_context *ctx; struct intel_engine_cs *engine; enum intel_engine_id id; - int err; + int err = 0; /* * As we reset the gpu during very early sanitisation, the current @@ -5397,36 +4717,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; } - err = i915_gem_switch_to_kernel_context(i915); - if (err) - goto err_active; - - if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) { - i915_gem_set_wedged(i915); - err = -EIO; /* Caller will declare us wedged */ + /* Flush the default context image to memory, and enable powersaving. */ + if (!load_power_context(i915)) { + err = -EIO; goto err_active; } - assert_kernel_context_is_current(i915); - - /* - * Immediately park the GPU so that we enable powersaving and - * treat it as idle. The next time we issue a request, we will - * unpark and start using the engine->pinned_default_state, otherwise - * it is in limbo and an early reset may fail. - */ - __i915_gem_park(i915); - for_each_engine(engine, i915, id) { + struct intel_context *ce; struct i915_vma *state; void *vaddr; - GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count); + ce = intel_context_lookup(ctx, engine); + if (!ce) + continue; - state = to_intel_context(ctx, engine)->state; + state = ce->state; if (!state) continue; + GEM_BUG_ON(intel_context_is_pinned(ce)); + /* * As we will hold a reference to the logical state, it will * not be torn down with the context, and importantly the @@ -5444,6 +4755,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; engine->default_state = i915_gem_object_get(state->obj); + i915_gem_object_set_cache_coherency(engine->default_state, + I915_CACHE_LLC); /* Check we can acquire the image of the context state */ vaddr = i915_gem_object_pin_map(engine->default_state, @@ -5482,19 +4795,10 @@ out_ctx: err_active: /* * If we have to abandon now, we expect the engines to be idle - * and ready to be torn-down. First try to flush any remaining - * request, ensure we are pointing at the kernel context and - * then remove it. + * and ready to be torn-down. The quickest way we can accomplish + * this is by declaring ourselves wedged. */ - if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) - goto out_ctx; - - if (WARN_ON(i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT))) - goto out_ctx; - - i915_gem_contexts_lost(i915); + i915_gem_set_wedged(i915); goto out_ctx; } @@ -5555,6 +4859,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->gt.cleanup_engine = intel_engine_cleanup; } + i915_timelines_init(dev_priv); + ret = i915_gem_init_userptr(dev_priv); if (ret) return ret; @@ -5574,7 +4880,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * just magically go away. */ mutex_lock(&dev_priv->drm.struct_mutex); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = i915_gem_init_ggtt(dev_priv); if (ret) { @@ -5583,7 +4889,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) } ret = i915_gem_init_scratch(dev_priv, - IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); + IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_ggtt; @@ -5636,7 +4942,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_init_hw; } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); return 0; @@ -5650,7 +4956,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_init_hw: mutex_unlock(&dev_priv->drm.struct_mutex); - WARN_ON(i915_gem_suspend(dev_priv)); + i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); i915_gem_drain_workqueue(dev_priv); @@ -5671,14 +4977,16 @@ err_scratch: i915_gem_fini_scratch(dev_priv); err_ggtt: err_unlock: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); err_uc_misc: intel_uc_fini_misc(dev_priv); - if (ret != -EIO) + if (ret != -EIO) { i915_gem_cleanup_userptr(dev_priv); + i915_timelines_fini(dev_priv); + } if (ret == -EIO) { mutex_lock(&dev_priv->drm.struct_mutex); @@ -5688,7 +4996,7 @@ err_uc_misc: * wedged. But we only want to do this where the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) { + if (!i915_reset_failed(dev_priv)) { i915_load_error(dev_priv, "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); @@ -5729,6 +5037,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); + i915_timelines_fini(dev_priv); i915_gem_drain_freed_objects(dev_priv); @@ -5800,38 +5109,8 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) int i915_gem_init_early(struct drm_i915_private *dev_priv) { - int err = -ENOMEM; - - dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); - if (!dev_priv->objects) - goto err_out; - - dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); - if (!dev_priv->vmas) - goto err_objects; - - dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0); - if (!dev_priv->luts) - goto err_vmas; - - dev_priv->requests = KMEM_CACHE(i915_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!dev_priv->requests) - goto err_luts; - - dev_priv->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!dev_priv->dependencies) - goto err_requests; - - dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!dev_priv->priorities) - goto err_dependencies; + int err; - INIT_LIST_HEAD(&dev_priv->gt.timelines); INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); @@ -5843,6 +5122,8 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) i915_gem_idle_work_handler); init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); + mutex_init(&dev_priv->gpu_error.wedge_mutex); + init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); @@ -5853,19 +5134,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); return 0; - -err_dependencies: - kmem_cache_destroy(dev_priv->dependencies); -err_requests: - kmem_cache_destroy(dev_priv->requests); -err_luts: - kmem_cache_destroy(dev_priv->luts); -err_vmas: - kmem_cache_destroy(dev_priv->vmas); -err_objects: - kmem_cache_destroy(dev_priv->objects); -err_out: - return err; } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) @@ -5874,17 +5142,8 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.object_count); - WARN_ON(!list_empty(&dev_priv->gt.timelines)); - - kmem_cache_destroy(dev_priv->priorities); - kmem_cache_destroy(dev_priv->dependencies); - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->luts); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); - /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ - rcu_barrier(); + cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); i915_gemfs_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index b0e4b976880c..9074eb1e843f 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -73,14 +73,14 @@ struct drm_i915_private; #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif -#define I915_NUM_ENGINES 8 +#define I915_GEM_IDLE_TIMEOUT (HZ / 5) void i915_gem_park(struct drm_i915_private *i915); void i915_gem_unpark(struct drm_i915_private *i915); static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) { - if (atomic_inc_return(&t->count) == 1) + if (!atomic_fetch_inc(&t->count)) tasklet_unlock_wait(t); } @@ -89,4 +89,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) return !atomic_read(&t->count); } +static inline bool __tasklet_enable(struct tasklet_struct *t) +{ + return atomic_dec_and_test(&t->count); +} + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 371c07087095..fe7ddb1f59e1 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -86,14 +86,34 @@ */ #include <linux/log2.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_globals.h" #include "i915_trace.h" +#include "i915_user_extensions.h" +#include "intel_lrc_reg.h" #include "intel_workarounds.h" +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1) +#define I915_CONTEXT_PARAM_VM 0x9 + #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 +static struct i915_global_gem_context { + struct i915_global base; + struct kmem_cache *slab_luts; +} global; + +struct i915_lut_handle *i915_lut_handle_alloc(void) +{ + return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); +} + +void i915_lut_handle_free(struct i915_lut_handle *lut) +{ + return kmem_cache_free(global.slab_luts, lut); +} + static void lut_close(struct i915_gem_context *ctx) { struct i915_lut_handle *lut, *ln; @@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx) list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { list_del(&lut->obj_link); - kmem_cache_free(ctx->i915->luts, lut); + i915_lut_handle_free(lut); } + INIT_LIST_HEAD(&ctx->handles_list); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + + vma->open_count--; __i915_gem_object_release_unless_active(vma->obj); } rcu_read_unlock(); @@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx) { - unsigned int n; + struct intel_context *it, *n; lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + GEM_BUG_ON(!list_empty(&ctx->active_engines)); release_hw_id(ctx); i915_ppgtt_put(ctx->ppgtt); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; + rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node) + intel_context_put(it); - if (ce->ops) - ce->ops->destroy(ce); - } + if (ctx->timeline) + i915_timeline_put(ctx->timeline); kfree(ctx->name); put_pid(ctx->pid); list_del(&ctx->link); + mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } @@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx) * the ppgtt). */ lut_close(ctx); - if (ctx->ppgtt) - i915_ppgtt_close(&ctx->ppgtt->vm); ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_put(ctx); @@ -307,11 +329,11 @@ static u32 default_desc_template(const struct drm_i915_private *i915, desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) + if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) address_mode = INTEL_LEGACY_64B_CONTEXT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(i915)) + if (IS_GEN(i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; /* TODO: WaDisableLiteRestore when we start using semaphore @@ -323,110 +345,114 @@ static u32 default_desc_template(const struct drm_i915_private *i915, } static struct i915_gem_context * -__create_hw_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +__create_context(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; - unsigned int n; - int ret; + int i; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (ctx == NULL) + if (!ctx) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); + INIT_LIST_HEAD(&ctx->active_engines); + mutex_init(&ctx->mutex); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; - - ce->gem_context = ctx; - } + ctx->hw_contexts = RB_ROOT; + spin_lock_init(&ctx->hw_contexts_lock); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - /* Default context will never have a file_priv */ - ret = DEFAULT_CONTEXT_HANDLE; - if (file_priv) { - ret = idr_alloc(&file_priv->context_idr, ctx, - DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); - if (ret < 0) - goto err_lut; - } - ctx->user_handle = ret; - - ctx->file_priv = file_priv; - if (file_priv) { - ctx->pid = get_task_pid(current, PIDTYPE_PID); - ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x", - current->comm, - pid_nr(ctx->pid), - ctx->user_handle); - if (!ctx->name) { - ret = -ENOMEM; - goto err_pid; - } - } - /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(dev_priv); i915_gem_context_set_bannable(ctx); + i915_gem_context_set_recoverable(ctx); + ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) + ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; + return ctx; +} -err_pid: - put_pid(ctx->pid); - idr_remove(&file_priv->context_idr, ctx->user_handle); -err_lut: - context_close(ctx); - return ERR_PTR(ret); +static struct i915_hw_ppgtt * +__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) +{ + struct i915_hw_ppgtt *old = ctx->ppgtt; + + ctx->ppgtt = i915_ppgtt_get(ppgtt); + ctx->desc_template = default_desc_template(ctx->i915, ppgtt); + + return old; } -static void __destroy_hw_context(struct i915_gem_context *ctx, - struct drm_i915_file_private *file_priv) +static void __assign_ppgtt(struct i915_gem_context *ctx, + struct i915_hw_ppgtt *ppgtt) { - idr_remove(&file_priv->context_idr, ctx->user_handle); - context_close(ctx); + if (ppgtt == ctx->ppgtt) + return; + + ppgtt = __set_ppgtt(ctx, ppgtt); + if (ppgtt) + i915_ppgtt_put(ppgtt); } static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) { struct i915_gem_context *ctx; lockdep_assert_held(&dev_priv->drm.struct_mutex); + BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE & + ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN); + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && + !HAS_EXECLISTS(dev_priv)) + return ERR_PTR(-EINVAL); + /* Reap the most stale context */ contexts_free_first(dev_priv); - ctx = __create_hw_context(dev_priv, file_priv); + ctx = __create_context(dev_priv); if (IS_ERR(ctx)) return ctx; if (HAS_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv, file_priv); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - __destroy_hw_context(ctx, file_priv); + context_close(ctx); return ERR_CAST(ppgtt); } - ctx->ppgtt = ppgtt; - ctx->desc_template = default_desc_template(dev_priv, ppgtt); + __assign_ppgtt(ctx, ppgtt); + i915_ppgtt_put(ppgtt); + } + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + struct i915_timeline *timeline; + + timeline = i915_timeline_create(dev_priv, NULL); + if (IS_ERR(timeline)) { + context_close(ctx); + return ERR_CAST(timeline); + } + + ctx->timeline = timeline; } trace_i915_context_create(ctx); @@ -457,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = i915_gem_create_context(to_i915(dev), NULL); + ctx = i915_gem_create_context(to_i915(dev), 0); if (IS_ERR(ctx)) goto out; + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) { + context_close(ctx); + ctx = ERR_PTR(ret); + goto out; + } + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); @@ -493,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) struct i915_gem_context *ctx; int err; - ctx = i915_gem_create_context(i915, NULL); + ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; @@ -540,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->preempt_context); - intel_engine_init_ctx_wa(dev_priv->engine[RCS]); + intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); init_contexts(dev_priv); /* lowest priority; idle task */ @@ -601,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) static int context_idr_cleanup(int id, void *p, void *data) { - struct i915_gem_context *ctx = p; + context_close(p); + return 0; +} - context_close(ctx); +static int vm_idr_cleanup(int id, void *p, void *data) +{ + i915_ppgtt_put(p); return 0; } +static int gem_context_register(struct i915_gem_context *ctx, + struct drm_i915_file_private *fpriv) +{ + int ret; + + ctx->file_priv = fpriv; + if (ctx->ppgtt) + ctx->ppgtt->vm.file = fpriv; + + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", + current->comm, pid_nr(ctx->pid)); + if (!ctx->name) { + ret = -ENOMEM; + goto err_pid; + } + + /* And finally expose ourselves to userspace via the idr */ + mutex_lock(&fpriv->context_idr_lock); + ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&fpriv->context_idr_lock); + if (ret >= 0) + goto out; + + kfree(fetch_and_zero(&ctx->name)); +err_pid: + put_pid(fetch_and_zero(&ctx->pid)); +out: + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int err; + + mutex_init(&file_priv->context_idr_lock); + mutex_init(&file_priv->vm_idr_lock); idr_init(&file_priv->context_idr); + idr_init_base(&file_priv->vm_idr, 1); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); + ctx = i915_gem_create_context(i915, 0); mutex_unlock(&i915->drm.struct_mutex); if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + err = PTR_ERR(ctx); + goto err; } + err = gem_context_register(ctx, file_priv); + if (err < 0) + goto err_ctx; + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + GEM_BUG_ON(err > 0); return 0; + +err_ctx: + mutex_lock(&i915->drm.struct_mutex); + context_close(ctx); + mutex_unlock(&i915->drm.struct_mutex); +err: + idr_destroy(&file_priv->vm_idr); + idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->vm_idr_lock); + mutex_destroy(&file_priv->context_idr_lock); + return err; } void i915_gem_context_close(struct drm_file *file) @@ -636,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file) idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->context_idr_lock); + + idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); + idr_destroy(&file_priv->vm_idr); + mutex_destroy(&file_priv->vm_idr_lock); +} + +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_vm_control *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_ppgtt *ppgtt; + int err; + + if (!HAS_FULL_PPGTT(i915)) + return -ENODEV; + + if (args->flags) + return -EINVAL; + + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + ppgtt->vm.file = file_priv; + + if (args->extensions) { + err = i915_user_extensions(u64_to_user_ptr(args->extensions), + NULL, 0, + ppgtt); + if (err) + goto err_put; + } + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + goto err_put; + + err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_unlock; + + GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */ + ppgtt->user_handle = err; + + mutex_unlock(&file_priv->vm_idr_lock); + + args->vm_id = err; + return 0; + +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return err; +} + +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_vm_control *args = data; + struct i915_hw_ppgtt *ppgtt; + int err; + u32 id; + + if (args->flags) + return -EINVAL; + + if (args->extensions) + return -EINVAL; + + id = args->vm_id; + if (!id) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_remove(&file_priv->vm_idr, id); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != id); + ppgtt->user_handle = 0; + } + + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + i915_ppgtt_put(ppgtt); + return 0; } static struct i915_request * @@ -646,12 +829,11 @@ last_request_on_engine(struct i915_timeline *timeline, GEM_BUG_ON(timeline == &engine->timeline); - rq = i915_gem_active_raw(&timeline->last_request, - &engine->i915->drm.struct_mutex); - if (rq && rq->engine == engine) { - GEM_TRACE("last request for %s on engine %s: %llx:%d\n", - timeline->name, engine->name, - rq->fence.context, rq->fence.seqno); + rq = i915_active_request_raw(&timeline->last_request, + &engine->i915->drm.struct_mutex); + if (rq && rq->engine->mask & engine->mask) { + GEM_TRACE("last request on engine %s: %llx:%llu\n", + engine->name, rq->fence.context, rq->fence.seqno); GEM_BUG_ON(rq->timeline != timeline); return rq; } @@ -659,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline, return NULL; } -static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine) +struct context_barrier_task { + struct i915_active base; + void (*task)(void *data); + void *data; +}; + +static void cb_retire(struct i915_active *base) +{ + struct context_barrier_task *cb = container_of(base, typeof(*cb), base); + + if (cb->task) + cb->task(cb->data); + + i915_active_fini(&cb->base); + kfree(cb); +} + +I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); +static int context_barrier_task(struct i915_gem_context *ctx, + intel_engine_mask_t engines, + int (*emit)(struct i915_request *rq, void *data), + void (*task)(void *data), + void *data) { - struct drm_i915_private *i915 = engine->i915; - const struct intel_context * const ce = - to_intel_context(i915->kernel_context, engine); - struct i915_timeline *barrier = ce->ring->timeline; - struct intel_ring *ring; - bool any_active = false; + struct drm_i915_private *i915 = ctx->i915; + struct context_barrier_task *cb; + struct intel_context *ce, *next; + intel_wakeref_t wakeref; + int err = 0; lockdep_assert_held(&i915->drm.struct_mutex); - list_for_each_entry(ring, &i915->gt.active_rings, active_link) { - struct i915_request *rq; + GEM_BUG_ON(!task); - rq = last_request_on_engine(ring->timeline, engine); - if (!rq) - continue; + cb = kmalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; + + i915_active_init(i915, &cb->base, cb_retire); + i915_active_acquire(&cb->base); - any_active = true; + wakeref = intel_runtime_pm_get(i915); + rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) { + struct intel_engine_cs *engine = ce->engine; + struct i915_request *rq; - if (rq->hw_context == ce) + if (!(engine->mask & engines)) continue; - /* - * Was this request submitted after the previous - * switch-to-kernel-context? - */ - if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { - GEM_TRACE("%s needs barrier for %llx:%d\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); - return false; + if (I915_SELFTEST_ONLY(context_barrier_inject_fault & + engine->mask)) { + err = -ENXIO; + break; + } + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; } - GEM_TRACE("%s has barrier after %llx:%d\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); + err = 0; + if (emit) + err = emit(rq, data); + if (err == 0) + err = i915_active_ref(&cb->base, rq->fence.context, rq); + + i915_request_add(rq); + if (err) + break; } + intel_runtime_pm_put(i915, wakeref); - /* - * If any other timeline was still active and behind the last barrier, - * then our last switch-to-kernel-context must still be queued and - * will run last (leaving the engine in the kernel context when it - * eventually idles). - */ - if (any_active) - return true; + cb->task = err ? NULL : task; /* caller needs to unwind instead */ + cb->data = data; - /* The engine is idle; check that it is idling in the kernel context. */ - return engine->last_retired_context == ce; + i915_active_release(&cb->base); + + return err; } -int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, + intel_engine_mask_t mask) { struct intel_engine_cs *engine; - enum intel_engine_id id; GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake)); lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->kernel_context); - i915_retire_requests(i915); + /* Inoperable, so presume the GPU is safely pointing into the void! */ + if (i915_terminally_wedged(i915)) + return 0; - for_each_engine(engine, i915, id) { + for_each_engine_masked(engine, i915, mask, mask) { struct intel_ring *ring; struct i915_request *rq; - GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine)); - if (engine_has_kernel_context_barrier(engine)) - continue; - - GEM_TRACE("emit barrier on %s\n", engine->name); - rq = i915_request_alloc(engine, i915->kernel_context); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -749,14 +954,13 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) if (prev->gem_context == i915->kernel_context) continue; - GEM_TRACE("add barrier on %s for %llx:%d\n", + GEM_TRACE("add barrier on %s for %llx:%lld\n", engine->name, prev->fence.context, prev->fence.seqno); i915_sw_fence_await_sw_fence_gfp(&rq->submit, &prev->submit, I915_FENCE_GFP); - i915_timeline_sync_set(rq->timeline, &prev->fence); } i915_request_add(rq); @@ -765,6 +969,558 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) return 0; } +static int get_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_hw_ppgtt *ppgtt; + int ret; + + return -EINVAL; /* nothing to see here; please move along */ + + if (!ctx->ppgtt) + return -ENODEV; + + /* XXX rcu acquire? */ + ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (ret) + return ret; + + ppgtt = i915_ppgtt_get(ctx->ppgtt); + mutex_unlock(&ctx->i915->drm.struct_mutex); + + ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (ret) + goto err_put; + + if (!ppgtt->user_handle) { + ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + GEM_BUG_ON(!ret); + if (ret < 0) + goto err_unlock; + + ppgtt->user_handle = ret; + i915_ppgtt_get(ppgtt); + } + + args->size = 0; + args->value = ppgtt->user_handle; + + ret = 0; +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return ret; +} + +static void set_ppgtt_barrier(void *data) +{ + struct i915_hw_ppgtt *old = data; + + if (INTEL_GEN(old->vm.i915) < 8) + gen6_ppgtt_unpin_all(old); + + i915_ppgtt_put(old); +} + +static int emit_ppgtt_update(struct i915_request *rq, void *data) +{ + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; + struct intel_engine_cs *engine = rq->engine; + u32 *cs; + int i; + + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0)); + *cs++ = lower_32_bits(pd_daddr); + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else { + /* ppGTT is not part of the legacy context image */ + gen6_ppgtt_pin(ppgtt); + } + + return 0; +} + +static int set_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_hw_ppgtt *ppgtt, *old; + int err; + + return -EINVAL; /* nothing to see here; please move along */ + + if (args->size) + return -EINVAL; + + if (!ctx->ppgtt) + return -ENODEV; + + if (upper_32_bits(args->value)) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_find(&file_priv->vm_idr, args->value); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != args->value); + i915_ppgtt_get(ppgtt); + } + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (err) + goto out; + + if (ppgtt == ctx->ppgtt) + goto unlock; + + /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ + lut_close(ctx); + + old = __set_ppgtt(ctx, ppgtt); + + /* + * We need to flush any requests using the current ppgtt before + * we release it as the requests do not hold a reference themselves, + * only indirectly through the context. + */ + err = context_barrier_task(ctx, ALL_ENGINES, + emit_ppgtt_update, + set_ppgtt_barrier, + old); + if (err) { + ctx->ppgtt = old; + ctx->desc_template = default_desc_template(ctx->i915, old); + i915_ppgtt_put(ppgtt); + } + +unlock: + mutex_unlock(&ctx->i915->drm.struct_mutex); + +out: + i915_ppgtt_put(ppgtt); + return err; +} + +static int gen8_emit_rpcs_config(struct i915_request *rq, + struct intel_context *ce, + struct intel_sseu sseu) +{ + u64 offset; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + offset = i915_ggtt_offset(ce->state) + + LRC_STATE_PN * PAGE_SIZE + + (CTX_R_PWR_CLK_STATE + 1) * 4; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = gen8_make_rpcs(rq->i915, &sseu); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) +{ + struct drm_i915_private *i915 = ce->engine->i915; + struct i915_request *rq, *prev; + intel_wakeref_t wakeref; + int ret; + + lockdep_assert_held(&ce->pin_mutex); + + /* + * If the context is not idle, we have to submit an ordered request to + * modify its context image via the kernel context (writing to our own + * image, or into the registers directory, does not stick). Pristine + * and idle contexts will be configured on pinning. + */ + if (!intel_context_is_pinned(ce)) + return 0; + + /* Submitting requests etc needs the hw awake. */ + wakeref = intel_runtime_pm_get(i915); + + rq = i915_request_alloc(ce->engine, i915->kernel_context); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out_put; + } + + /* Queue this switch after all other activity by this context. */ + prev = i915_active_request_raw(&ce->ring->timeline->last_request, + &i915->drm.struct_mutex); + if (prev && !i915_request_completed(prev)) { + ret = i915_request_await_dma_fence(rq, &prev->fence); + if (ret < 0) + goto out_add; + } + + /* Order all following requests to be after. */ + ret = i915_timeline_set_barrier(ce->ring->timeline, rq); + if (ret) + goto out_add; + + ret = gen8_emit_rpcs_config(rq, ce, sseu); + if (ret) + goto out_add; + + /* + * Guarantee context image and the timeline remains pinned until the + * modifying request is retired by setting the ce activity tracker. + * + * But we only need to take one pin on the account of it. Or in other + * words transfer the pinned ce object to tracked active request. + */ + if (!i915_active_request_isset(&ce->active_tracker)) + __intel_context_pin(ce); + __i915_active_request_set(&ce->active_tracker, rq); + +out_add: + i915_request_add(rq); +out_put: + intel_runtime_pm_put(i915, wakeref); + + return ret; +} + +static int +__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_sseu sseu) +{ + struct intel_context *ce; + int ret = 0; + + GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); + GEM_BUG_ON(engine->id != RCS0); + + ce = intel_context_pin_lock(ctx, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + /* Nothing to do if unmodified. */ + if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) + goto unlock; + + ret = gen8_modify_rpcs(ce, sseu); + if (!ret) + ce->sseu = sseu; + +unlock: + intel_context_pin_unlock(ce); + return ret; +} + +static int +i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_sseu sseu) +{ + int ret; + + ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (ret) + return ret; + + ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); + + mutex_unlock(&ctx->i915->drm.struct_mutex); + + return ret; +} + +static int +user_to_context_sseu(struct drm_i915_private *i915, + const struct drm_i915_gem_context_param_sseu *user, + struct intel_sseu *context) +{ + const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; + + /* No zeros in any field. */ + if (!user->slice_mask || !user->subslice_mask || + !user->min_eus_per_subslice || !user->max_eus_per_subslice) + return -EINVAL; + + /* Max > min. */ + if (user->max_eus_per_subslice < user->min_eus_per_subslice) + return -EINVAL; + + /* + * Some future proofing on the types since the uAPI is wider than the + * current internal implementation. + */ + if (overflows_type(user->slice_mask, context->slice_mask) || + overflows_type(user->subslice_mask, context->subslice_mask) || + overflows_type(user->min_eus_per_subslice, + context->min_eus_per_subslice) || + overflows_type(user->max_eus_per_subslice, + context->max_eus_per_subslice)) + return -EINVAL; + + /* Check validity against hardware. */ + if (user->slice_mask & ~device->slice_mask) + return -EINVAL; + + if (user->subslice_mask & ~device->subslice_mask[0]) + return -EINVAL; + + if (user->max_eus_per_subslice > device->max_eus_per_subslice) + return -EINVAL; + + context->slice_mask = user->slice_mask; + context->subslice_mask = user->subslice_mask; + context->min_eus_per_subslice = user->min_eus_per_subslice; + context->max_eus_per_subslice = user->max_eus_per_subslice; + + /* Part specific restrictions. */ + if (IS_GEN(i915, 11)) { + unsigned int hw_s = hweight8(device->slice_mask); + unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); + unsigned int req_s = hweight8(context->slice_mask); + unsigned int req_ss = hweight8(context->subslice_mask); + + /* + * Only full subslice enablement is possible if more than one + * slice is turned on. + */ + if (req_s > 1 && req_ss != hw_ss_per_s) + return -EINVAL; + + /* + * If more than four (SScount bitfield limit) subslices are + * requested then the number has to be even. + */ + if (req_ss > 4 && (req_ss & 1)) + return -EINVAL; + + /* + * If only one slice is enabled and subslice count is below the + * device full enablement, it must be at most half of the all + * available subslices. + */ + if (req_s == 1 && req_ss < hw_ss_per_s && + req_ss > (hw_ss_per_s / 2)) + return -EINVAL; + + /* ABI restriction - VME use case only. */ + + /* All slices or one slice only. */ + if (req_s != 1 && req_s != hw_s) + return -EINVAL; + + /* + * Half subslices or full enablement only when one slice is + * enabled. + */ + if (req_s == 1 && + (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) + return -EINVAL; + + /* No EU configuration changes. */ + if ((user->min_eus_per_subslice != + device->max_eus_per_subslice) || + (user->max_eus_per_subslice != + device->max_eus_per_subslice)) + return -EINVAL; + } + + return 0; +} + +static int set_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_engine_cs *engine; + struct intel_sseu sseu; + int ret; + + if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (!IS_GEN(i915, 11)) + return -ENODEV; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.flags || user_sseu.rsvd) + return -EINVAL; + + engine = intel_engine_lookup_user(i915, + user_sseu.engine_class, + user_sseu.engine_instance); + if (!engine) + return -EINVAL; + + /* Only render engine supports RPCS configuration. */ + if (engine->class != RENDER_CLASS) + return -ENODEV; + + ret = user_to_context_sseu(i915, &user_sseu, &sseu); + if (ret) + return ret; + + ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu); + if (ret) + return ret; + + args->size = sizeof(user_sseu); + + return 0; +} + +static int ctx_setparam(struct drm_i915_file_private *fpriv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + int ret = 0; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ZEROMAP: + if (args->size) + ret = -EINVAL; + else if (args->value) + set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + else + clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + break; + + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_no_error_capture(ctx); + else + i915_gem_context_clear_no_error_capture(ctx); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + if (args->size) + ret = -EINVAL; + else if (!capable(CAP_SYS_ADMIN) && !args->value) + ret = -EPERM; + else if (args->value) + i915_gem_context_set_bannable(ctx); + else + i915_gem_context_clear_bannable(ctx); + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_recoverable(ctx); + else + i915_gem_context_clear_recoverable(ctx); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + { + s64 priority = args->value; + + if (args->size) + ret = -EINVAL; + else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + ret = -ENODEV; + else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + ret = -EINVAL; + else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + ret = -EPERM; + else + ctx->sched.priority = + I915_USER_PRIORITY(priority); + } + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = set_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = set_ppgtt(fpriv, ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +struct create_ext { + struct i915_gem_context *ctx; + struct drm_i915_file_private *fpriv; +}; + +static int create_setparam(struct i915_user_extension __user *ext, void *data) +{ + struct drm_i915_gem_context_create_ext_setparam local; + const struct create_ext *arg = data; + + if (copy_from_user(&local, ext, sizeof(local))) + return -EFAULT; + + if (local.param.ctx_id) + return -EINVAL; + + return ctx_setparam(arg->fpriv, arg->ctx, &local.param); +} + +static const i915_user_extension_fn create_extensions[] = { + [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, +}; + static bool client_is_banned(struct drm_i915_file_private *file_priv) { return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; @@ -773,23 +1529,26 @@ static bool client_is_banned(struct drm_i915_file_private *file_priv) int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_context_create *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_context_create_ext *args = data; + struct create_ext ext_data; int ret; - if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) + if (!DRIVER_CAPS(i915)->has_logical_contexts) return -ENODEV; - if (args->pad != 0) + if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; - if (client_is_banned(file_priv)) { + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + + ext_data.fpriv = file->driver_priv; + if (client_is_banned(ext_data.fpriv)) { DRM_DEBUG("client %s[%d] banned from creating ctx\n", current->comm, pid_nr(get_task_pid(current, PIDTYPE_PID))); - return -EIO; } @@ -797,17 +1556,34 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = i915_gem_create_context(dev_priv, file_priv); + ext_data.ctx = i915_gem_create_context(i915, args->flags); mutex_unlock(&dev->struct_mutex); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + if (IS_ERR(ext_data.ctx)) + return PTR_ERR(ext_data.ctx); + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { + ret = i915_user_extensions(u64_to_user_ptr(args->extensions), + create_extensions, + ARRAY_SIZE(create_extensions), + &ext_data); + if (ret) + goto err_ctx; + } - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + if (ret < 0) + goto err_ctx; - args->ctx_id = ctx->user_handle; + args->ctx_id = ret; DRM_DEBUG("HW context %d created\n", args->ctx_id); return 0; + +err_ctx: + mutex_lock(&dev->struct_mutex); + context_close(ext_data.ctx); + mutex_unlock(&dev->struct_mutex); + return ret; } int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, @@ -816,27 +1592,71 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; - int ret; if (args->pad != 0) return -EINVAL; - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) + if (!args->ctx_id) return -ENOENT; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (mutex_lock_interruptible(&file_priv->context_idr_lock)) + return -EINTR; + + ctx = idr_remove(&file_priv->context_idr, args->ctx_id); + mutex_unlock(&file_priv->context_idr_lock); if (!ctx) return -ENOENT; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) + mutex_lock(&dev->struct_mutex); + context_close(ctx); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int get_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_engine_cs *engine; + struct intel_context *ce; + + if (args->size == 0) goto out; + else if (args->size < sizeof(user_sseu)) + return -EINVAL; - __destroy_hw_context(ctx, file_priv); - mutex_unlock(&dev->struct_mutex); + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.flags || user_sseu.rsvd) + return -EINVAL; + + engine = intel_engine_lookup_user(ctx->i915, + user_sseu.engine_class, + user_sseu.engine_instance); + if (!engine) + return -EINVAL; + + ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */ + if (IS_ERR(ce)) + return PTR_ERR(ce); + + user_sseu.slice_mask = ce->sseu.slice_mask; + user_sseu.subslice_mask = ce->sseu.subslice_mask; + user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; + user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + + intel_context_pin_unlock(ce); + + if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, + sizeof(user_sseu))) + return -EFAULT; out: - i915_gem_context_put(ctx); + args->size = sizeof(user_sseu); + return 0; } @@ -852,15 +1672,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, if (!ctx) return -ENOENT; - args->size = 0; switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; case I915_CONTEXT_PARAM_NO_ZEROMAP: + args->size = 0; args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); break; + case I915_CONTEXT_PARAM_GTT_SIZE: + args->size = 0; if (ctx->ppgtt) args->value = ctx->ppgtt->vm.total; else if (to_i915(dev)->mm.aliasing_ppgtt) @@ -868,15 +1687,36 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, else args->value = to_i915(dev)->ggtt.vm.total; break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->size = 0; args->value = i915_gem_context_no_error_capture(ctx); break; + case I915_CONTEXT_PARAM_BANNABLE: + args->size = 0; args->value = i915_gem_context_is_bannable(ctx); break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + args->size = 0; + args->value = i915_gem_context_is_recoverable(ctx); + break; + case I915_CONTEXT_PARAM_PRIORITY: + args->size = 0; args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; break; + + case I915_CONTEXT_PARAM_SSEU: + ret = get_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = get_ppgtt(file_priv, ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; break; @@ -892,67 +1732,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; - int ret = 0; + int ret; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (!ctx) return -ENOENT; - switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; - case I915_CONTEXT_PARAM_NO_ZEROMAP: - if (args->size) - ret = -EINVAL; - else if (args->value) - set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - else - clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: - if (args->size) - ret = -EINVAL; - else if (args->value) - i915_gem_context_set_no_error_capture(ctx); - else - i915_gem_context_clear_no_error_capture(ctx); - break; - case I915_CONTEXT_PARAM_BANNABLE: - if (args->size) - ret = -EINVAL; - else if (!capable(CAP_SYS_ADMIN) && !args->value) - ret = -EPERM; - else if (args->value) - i915_gem_context_set_bannable(ctx); - else - i915_gem_context_clear_bannable(ctx); - break; - - case I915_CONTEXT_PARAM_PRIORITY: - { - s64 priority = args->value; - - if (args->size) - ret = -EINVAL; - else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - ret = -ENODEV; - else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || - priority < I915_CONTEXT_MIN_USER_PRIORITY) - ret = -EINVAL; - else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && - !capable(CAP_SYS_NICE)) - ret = -EPERM; - else - ctx->sched.priority = - I915_USER_PRIORITY(priority); - } - break; - - default: - ret = -EINVAL; - break; - } + ret = ctx_setparam(file_priv, ctx, args); i915_gem_context_put(ctx); return ret; @@ -1027,3 +1813,28 @@ out_unlock: #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" #endif + +static void i915_global_gem_context_shrink(void) +{ + kmem_cache_shrink(global.slab_luts); +} + +static void i915_global_gem_context_exit(void) +{ + kmem_cache_destroy(global.slab_luts); +} + +static struct i915_global_gem_context global = { { + .shrink = i915_global_gem_context_shrink, + .exit = i915_global_gem_context_exit, +} }; + +int __init i915_global_gem_context_init(void) +{ + global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!global.slab_luts) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index f6d870b1f73e..23dcb01bfd82 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -25,188 +25,16 @@ #ifndef __I915_GEM_CONTEXT_H__ #define __I915_GEM_CONTEXT_H__ -#include <linux/bitops.h> -#include <linux/list.h> -#include <linux/radix-tree.h> +#include "i915_gem_context_types.h" #include "i915_gem.h" #include "i915_scheduler.h" - -struct pid; +#include "intel_context.h" +#include "intel_device_info.h" struct drm_device; struct drm_file; -struct drm_i915_private; -struct drm_i915_file_private; -struct i915_hw_ppgtt; -struct i915_request; -struct i915_vma; -struct intel_ring; - -#define DEFAULT_CONTEXT_HANDLE 0 - -struct intel_context; - -struct intel_context_ops { - void (*unpin)(struct intel_context *ce); - void (*destroy)(struct intel_context *ce); -}; - -/** - * struct i915_gem_context - client state - * - * The struct i915_gem_context represents the combined view of the driver and - * logical hardware state for a particular client. - */ -struct i915_gem_context { - /** i915: i915 device backpointer */ - struct drm_i915_private *i915; - - /** file_priv: owning file descriptor */ - struct drm_i915_file_private *file_priv; - - /** - * @ppgtt: unique address space (GTT) - * - * In full-ppgtt mode, each context has its own address space ensuring - * complete seperation of one client from all others. - * - * In other modes, this is a NULL pointer with the expectation that - * the caller uses the shared global GTT. - */ - struct i915_hw_ppgtt *ppgtt; - - /** - * @pid: process id of creator - * - * Note that who created the context may not be the principle user, - * as the context may be shared across a local socket. However, - * that should only affect the default context, all contexts created - * explicitly by the client are expected to be isolated. - */ - struct pid *pid; - - /** - * @name: arbitrary name - * - * A name is constructed for the context from the creator's process - * name, pid and user handle in order to uniquely identify the - * context in messages. - */ - const char *name; - - /** link: place with &drm_i915_private.context_list */ - struct list_head link; - struct llist_node free_link; - - /** - * @ref: reference count - * - * A reference to a context is held by both the client who created it - * and on each request submitted to the hardware using the request - * (to ensure the hardware has access to the state until it has - * finished all pending writes). See i915_gem_context_get() and - * i915_gem_context_put() for access. - */ - struct kref ref; - - /** - * @rcu: rcu_head for deferred freeing. - */ - struct rcu_head rcu; - - /** - * @user_flags: small set of booleans controlled by the user - */ - unsigned long user_flags; -#define UCONTEXT_NO_ZEROMAP 0 -#define UCONTEXT_NO_ERROR_CAPTURE 1 -#define UCONTEXT_BANNABLE 2 - - /** - * @flags: small set of booleans - */ - unsigned long flags; -#define CONTEXT_BANNED 0 -#define CONTEXT_CLOSED 1 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 - - /** - * @hw_id: - unique identifier for the context - * - * The hardware needs to uniquely identify the context for a few - * functions like fault reporting, PASID, scheduling. The - * &drm_i915_private.context_hw_ida is used to assign a unqiue - * id for the lifetime of the context. - * - * @hw_id_pin_count: - number of times this context had been pinned - * for use (should be, at most, once per engine). - * - * @hw_id_link: - all contexts with an assigned id are tracked - * for possible repossession. - */ - unsigned int hw_id; - atomic_t hw_id_pin_count; - struct list_head hw_id_link; - - /** - * @user_handle: userspace identifier - * - * A unique per-file identifier is generated from - * &drm_i915_file_private.contexts. - */ - u32 user_handle; - - struct i915_sched_attr sched; - - /** engine: per-engine logical HW state */ - struct intel_context { - struct i915_gem_context *gem_context; - struct intel_engine_cs *active; - struct i915_vma *state; - struct intel_ring *ring; - u32 *lrc_reg_state; - u64 lrc_desc; - int pin_count; - - const struct intel_context_ops *ops; - } __engine[I915_NUM_ENGINES]; - - /** ring_size: size for allocating the per-engine ring buffer */ - u32 ring_size; - /** desc_template: invariant fields for the HW context descriptor */ - u32 desc_template; - - /** guilty_count: How many times this context has caused a GPU hang. */ - atomic_t guilty_count; - /** - * @active_count: How many times this context was active during a GPU - * hang, but did not cause it. - */ - atomic_t active_count; - -#define CONTEXT_SCORE_GUILTY 10 -#define CONTEXT_SCORE_BAN_THRESHOLD 40 - /** ban_score: Accumulated score of all hangs caused by this context. */ - atomic_t ban_score; - - /** remap_slice: Bitmask of cache lines that need remapping */ - u8 remap_slice; - - /** handles_vma: rbtree to look up our context specific obj/vma for - * the user handle. (user handles are per fd, but the binding is - * per vm, which may be one per context or shared with the global GTT) - */ - struct radix_tree_root handles_vma; - - /** handles_list: reverse list of all the rbtree entries in use for - * this context, which allows us to free all the allocations on - * context close. - */ - struct list_head handles_list; -}; - static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_CLOSED, &ctx->flags); @@ -248,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); } +static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_BANNED, &ctx->flags); @@ -283,45 +126,11 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) atomic_dec(&ctx->hw_id_pin_count); } -static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) -{ - return c->user_handle == DEFAULT_CONTEXT_HANDLE; -} - static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) { return !ctx->file_priv; } -static inline struct intel_context * -to_intel_context(struct i915_gem_context *ctx, - const struct intel_engine_cs *engine) -{ - return &ctx->__engine[engine->id]; -} - -static inline struct intel_context * -intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) -{ - return engine->context_pin(engine, ctx); -} - -static inline void __intel_context_pin(struct intel_context *ce) -{ - GEM_BUG_ON(!ce->pin_count); - ce->pin_count++; -} - -static inline void intel_context_unpin(struct intel_context *ce) -{ - GEM_BUG_ON(!ce->pin_count); - if (--ce->pin_count) - return; - - GEM_BUG_ON(!ce->ops); - ce->ops->unpin(ce); -} - /* i915_gem_context.c */ int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); @@ -332,12 +141,18 @@ int i915_gem_context_open(struct drm_i915_private *i915, void i915_gem_context_close(struct drm_file *file); int i915_switch_context(struct i915_request *rq); -int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask); void i915_gem_context_release(struct kref *ctx_ref); struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev); +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, @@ -364,4 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) kref_put(&ctx->ref, i915_gem_context_release); } +struct i915_lut_handle *i915_lut_handle_alloc(void); +void i915_lut_handle_free(struct i915_lut_handle *lut); + #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h new file mode 100644 index 000000000000..e2ec58b10fb2 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_context_types.h @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_CONTEXT_TYPES_H__ +#define __I915_GEM_CONTEXT_TYPES_H__ + +#include <linux/atomic.h> +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/rbtree.h> +#include <linux/rcupdate.h> +#include <linux/types.h> + +#include "i915_scheduler.h" +#include "intel_context_types.h" + +struct pid; + +struct drm_i915_private; +struct drm_i915_file_private; +struct i915_hw_ppgtt; +struct i915_timeline; +struct intel_ring; + +/** + * struct i915_gem_context - client state + * + * The struct i915_gem_context represents the combined view of the driver and + * logical hardware state for a particular client. + */ +struct i915_gem_context { + /** i915: i915 device backpointer */ + struct drm_i915_private *i915; + + /** file_priv: owning file descriptor */ + struct drm_i915_file_private *file_priv; + + struct i915_timeline *timeline; + + /** + * @ppgtt: unique address space (GTT) + * + * In full-ppgtt mode, each context has its own address space ensuring + * complete seperation of one client from all others. + * + * In other modes, this is a NULL pointer with the expectation that + * the caller uses the shared global GTT. + */ + struct i915_hw_ppgtt *ppgtt; + + /** + * @pid: process id of creator + * + * Note that who created the context may not be the principle user, + * as the context may be shared across a local socket. However, + * that should only affect the default context, all contexts created + * explicitly by the client are expected to be isolated. + */ + struct pid *pid; + + /** + * @name: arbitrary name + * + * A name is constructed for the context from the creator's process + * name, pid and user handle in order to uniquely identify the + * context in messages. + */ + const char *name; + + /** link: place with &drm_i915_private.context_list */ + struct list_head link; + struct llist_node free_link; + + /** + * @ref: reference count + * + * A reference to a context is held by both the client who created it + * and on each request submitted to the hardware using the request + * (to ensure the hardware has access to the state until it has + * finished all pending writes). See i915_gem_context_get() and + * i915_gem_context_put() for access. + */ + struct kref ref; + + /** + * @rcu: rcu_head for deferred freeing. + */ + struct rcu_head rcu; + + /** + * @user_flags: small set of booleans controlled by the user + */ + unsigned long user_flags; +#define UCONTEXT_NO_ZEROMAP 0 +#define UCONTEXT_NO_ERROR_CAPTURE 1 +#define UCONTEXT_BANNABLE 2 +#define UCONTEXT_RECOVERABLE 3 + + /** + * @flags: small set of booleans + */ + unsigned long flags; +#define CONTEXT_BANNED 0 +#define CONTEXT_CLOSED 1 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 + + /** + * @hw_id: - unique identifier for the context + * + * The hardware needs to uniquely identify the context for a few + * functions like fault reporting, PASID, scheduling. The + * &drm_i915_private.context_hw_ida is used to assign a unqiue + * id for the lifetime of the context. + * + * @hw_id_pin_count: - number of times this context had been pinned + * for use (should be, at most, once per engine). + * + * @hw_id_link: - all contexts with an assigned id are tracked + * for possible repossession. + */ + unsigned int hw_id; + atomic_t hw_id_pin_count; + struct list_head hw_id_link; + + struct list_head active_engines; + struct mutex mutex; + + struct i915_sched_attr sched; + + /** hw_contexts: per-engine logical HW state */ + struct rb_root hw_contexts; + spinlock_t hw_contexts_lock; + + /** ring_size: size for allocating the per-engine ring buffer */ + u32 ring_size; + /** desc_template: invariant fields for the HW context descriptor */ + u32 desc_template; + + /** guilty_count: How many times this context has caused a GPU hang. */ + atomic_t guilty_count; + /** + * @active_count: How many times this context was active during a GPU + * hang, but did not cause it. + */ + atomic_t active_count; + + /** + * @hang_timestamp: The last time(s) this context caused a GPU hang + */ + unsigned long hang_timestamp[2]; +#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */ + + /** remap_slice: Bitmask of cache lines that need remapping */ + u8 remap_slice; + + /** handles_vma: rbtree to look up our context specific obj/vma for + * the user handle. (user handles are per fd, but the binding is + * per vm, which may be one per context or shared with the global GTT) + */ + struct radix_tree_root handles_vma; + + /** handles_list: reverse list of all the rbtree entries in use for + * this context, which allows us to free all the allocations on + * context close. + */ + struct list_head handles_list; +}; + +#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 82e2ca17a441..5a101a9462d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -27,7 +27,6 @@ #include <linux/dma-buf.h> #include <linux/reservation.h> -#include <drm/drmP.h> #include "i915_drv.h" @@ -108,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); } @@ -301,7 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - obj = i915_gem_object_alloc(to_i915(dev)); + obj = i915_gem_object_alloc(); if (obj == NULL) { ret = -ENOMEM; goto fail_detach; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 02b83a5ed96c..060f5903544a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -26,7 +26,6 @@ * */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -39,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { static bool ggtt_is_idle(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (i915->gt.active_requests) - return false; - - for_each_engine(engine, i915, id) { - if (!intel_engine_has_kernel_context(engine)) - return false; - } - - return true; + return !i915->gt.active_requests; } static int ggtt_flush(struct drm_i915_private *i915) { int err; - /* Not everything in the GGTT is tracked via vma (otherwise we + /* + * Not everything in the GGTT is tracked via vma (otherwise we * could evict as required with minimal stalling) so we are forced * to idle the GPU and explicitly retire outstanding requests in * the hopes that we can then remove contexts and the like only * bound by their active reference. */ - err = i915_gem_switch_to_kernel_context(i915); + err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines); if (err) return err; @@ -127,31 +116,25 @@ i915_gem_evict_something(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->i915; struct drm_mm_scan scan; struct list_head eviction_list; - struct list_head *phases[] = { - &vm->inactive_list, - &vm->active_list, - NULL, - }, **phase; struct i915_vma *vma, *next; struct drm_mm_node *node; enum drm_mm_insert_mode mode; + struct i915_vma *active; int ret; lockdep_assert_held(&vm->i915->drm.struct_mutex); trace_i915_gem_evict(vm, min_size, alignment, flags); /* - * The goal is to evict objects and amalgamate space in LRU order. - * The oldest idle objects reside on the inactive list, which is in - * retirement order. The next objects to retire are those in flight, - * on the active list, again in retirement order. + * The goal is to evict objects and amalgamate space in rough LRU order. + * Since both active and inactive objects reside on the same list, + * in a mix of creation and last scanned order, as we process the list + * we sort it into inactive/active, which keeps the active portion + * in a rough MRU order. * * The retirement sequence is thus: - * 1. Inactive objects (already retired) - * 2. Active objects (will stall on unbinding) - * - * On each list, the oldest objects lie at the HEAD with the freshest - * object on the TAIL. + * 1. Inactive objects (already retired, random order) + * 2. Active objects (will stall on unbinding, oldest scanned first) */ mode = DRM_MM_INSERT_BEST; if (flags & PIN_HIGH) @@ -170,17 +153,46 @@ i915_gem_evict_something(struct i915_address_space *vm, */ if (!(flags & PIN_NONBLOCK)) i915_retire_requests(dev_priv); - else - phases[1] = NULL; search_again: + active = NULL; INIT_LIST_HEAD(&eviction_list); - phase = phases; - do { - list_for_each_entry(vma, *phase, vm_link) - if (mark_free(&scan, vma, flags, &eviction_list)) - goto found; - } while (*++phase); + list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { + /* + * We keep this list in a rough least-recently scanned order + * of active elements (inactive elements are cheap to reap). + * New entries are added to the end, and we move anything we + * scan to the end. The assumption is that the working set + * of applications is either steady state (and thanks to the + * userspace bo cache it almost always is) or volatile and + * frequently replaced after a frame, which are self-evicting! + * Given that assumption, the MRU order of the scan list is + * fairly static, and keeping it in least-recently scan order + * is suitable. + * + * To notice when we complete one full cycle, we record the + * first active element seen, before moving it to the tail. + */ + if (i915_vma_is_active(vma)) { + if (vma == active) { + if (flags & PIN_NONBLOCK) + break; + + active = ERR_PTR(-EAGAIN); + } + + if (active != ERR_PTR(-EAGAIN)) { + if (!active) + active = vma; + + list_move_tail(&vma->vm_link, &vm->bound_list); + continue; + } + } + + if (mark_free(&scan, vma, flags, &eviction_list)) + goto found; + } /* Nothing found, clean up and bail out! */ list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { @@ -389,11 +401,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, */ int i915_gem_evict_vm(struct i915_address_space *vm) { - struct list_head *phases[] = { - &vm->inactive_list, - &vm->active_list, - NULL - }, **phase; struct list_head eviction_list; struct i915_vma *vma, *next; int ret; @@ -413,16 +420,15 @@ int i915_gem_evict_vm(struct i915_address_space *vm) } INIT_LIST_HEAD(&eviction_list); - phase = phases; - do { - list_for_each_entry(vma, *phase, vm_link) { - if (i915_vma_is_pinned(vma)) - continue; + mutex_lock(&vm->mutex); + list_for_each_entry(vma, &vm->bound_list, vm_link) { + if (i915_vma_is_pinned(vma)) + continue; - __i915_vma_pin(vma); - list_add(&vma->evict_link, &eviction_list); - } - } while (*++phase); + __i915_vma_pin(vma); + list_add(&vma->evict_link, &eviction_list); + } + mutex_unlock(&vm->mutex); ret = 0; list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7b3ae2333dbf..3d672c9edb94 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -26,12 +26,11 @@ * */ -#include <linux/dma_remapping.h> +#include <linux/intel-iommu.h> #include <linux/reservation.h> #include <linux/sync_file.h> #include <linux/uaccess.h> -#include <drm/drmP.h> #include <drm/drm_syncobj.h> #include <drm/i915_drm.h> @@ -754,6 +753,68 @@ static int eb_select_context(struct i915_execbuffer *eb) return 0; } +static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring) +{ + struct i915_request *rq; + + /* + * Completely unscientific finger-in-the-air estimates for suitable + * maximum user request size (to avoid blocking) and then backoff. + */ + if (intel_ring_update_space(ring) >= PAGE_SIZE) + return NULL; + + /* + * Find a request that after waiting upon, there will be at least half + * the ring available. The hysteresis allows us to compete for the + * shared ring and should mean that we sleep less often prior to + * claiming our resources, but not so long that the ring completely + * drains before we can submit our next request. + */ + list_for_each_entry(rq, &ring->request_list, ring_link) { + if (__intel_ring_space(rq->postfix, + ring->emit, ring->size) > ring->size / 2) + break; + } + if (&rq->ring_link == &ring->request_list) + return NULL; /* weird, we will check again later for real */ + + return i915_request_get(rq); +} + +static int eb_wait_for_ring(const struct i915_execbuffer *eb) +{ + const struct intel_context *ce; + struct i915_request *rq; + int ret = 0; + + /* + * Apply a light amount of backpressure to prevent excessive hogs + * from blocking waiting for space whilst holding struct_mutex and + * keeping all of their resources pinned. + */ + + ce = intel_context_lookup(eb->ctx, eb->engine); + if (!ce || !ce->ring) /* first use, assume empty! */ + return 0; + + rq = __eb_wait_for_ring(ce->ring); + if (rq) { + mutex_unlock(&eb->i915->drm.struct_mutex); + + if (i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) + ret = -EINTR; + + i915_request_put(rq); + + mutex_lock(&eb->i915->drm.struct_mutex); + } + + return ret; +} + static int eb_lookup_vmas(struct i915_execbuffer *eb) { struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; @@ -788,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) } vma = i915_vma_instance(obj, eb->vm, NULL); - if (unlikely(IS_ERR(vma))) { + if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; } - lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL); + lut = i915_lut_handle_alloc(); if (unlikely(!lut)) { err = -ENOMEM; goto err_obj; @@ -801,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kmem_cache_free(eb->i915->luts, lut); + i915_lut_handle_free(lut); goto err_obj; } @@ -940,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache) { GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); i915_gem_object_unpin_map(cache->rq->batch->obj); + i915_gem_chipset_flush(cache->rq->i915); i915_request_add(cache->rq); @@ -1153,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (IS_ERR(cmd)) return PTR_ERR(cmd); - err = i915_gem_object_set_to_wc_domain(obj, false); - if (err) - goto err_unmap; - batch = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); @@ -1268,7 +1328,7 @@ relocate_entry(struct i915_vma *vma, else if (gen >= 4) len = 4; else - len = 6; + len = 3; batch = reloc_gpu(eb, vma, len); if (IS_ERR(batch)) @@ -1309,11 +1369,6 @@ relocate_entry(struct i915_vma *vma, *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = addr; *batch++ = target_offset; - - /* And again for good measure (blb/pnv) */ - *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *batch++ = addr; - *batch++ = target_offset; } goto out; @@ -1385,7 +1440,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, * batchbuffers. */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - IS_GEN6(eb->i915)) { + IS_GEN(eb->i915, 6)) { err = i915_vma_bind(target, target->obj->cache_level, PIN_GLOBAL); if (WARN_ONCE(err, @@ -1452,7 +1507,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) + if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { @@ -1559,7 +1614,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) addr = u64_to_user_ptr(entry->relocs_ptr); size *= sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(VERIFY_READ, addr, size)) + if (!access_ok(addr, size)) return -EFAULT; end = addr + size; @@ -1610,6 +1665,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) (char __user *)urelocs + copied, len)) { end_user: + user_access_end(); kvfree(relocs); err = -EFAULT; goto err; @@ -1628,7 +1684,9 @@ end_user: * happened we would make the mistake of assuming that the * relocations were valid. */ - user_access_begin(); + if (!user_access_begin(urelocs, size)) + goto end_user; + for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, &urelocs[copied].presumed_offset, @@ -1898,7 +1956,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) u32 *cs; int i; - if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) { + if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -1979,6 +2037,18 @@ static int eb_submit(struct i915_execbuffer *eb) return err; } + /* + * After we completed waiting for other engines (using HW semaphores) + * then we can signal that this request/batch is ready to run. This + * allows us to determine if the batch is still waiting on the GPU + * or actually running by checking the breadcrumb. + */ + if (eb->engine->emit_init_breadcrumb) { + err = eb->engine->emit_init_breadcrumb(eb->request); + if (err) + return err; + } + err = eb->engine->emit_bb_start(eb->request, eb->batch->node.start + eb->batch_start_offset, @@ -2011,11 +2081,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, #define I915_USER_RINGS (4) static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { - [I915_EXEC_DEFAULT] = RCS, - [I915_EXEC_RENDER] = RCS, - [I915_EXEC_BLT] = BCS, - [I915_EXEC_BSD] = VCS, - [I915_EXEC_VEBOX] = VECS + [I915_EXEC_DEFAULT] = RCS0, + [I915_EXEC_RENDER] = RCS0, + [I915_EXEC_BLT] = BCS0, + [I915_EXEC_BSD] = VCS0, + [I915_EXEC_VEBOX] = VECS0 }; static struct intel_engine_cs * @@ -2038,7 +2108,7 @@ eb_select_engine(struct drm_i915_private *dev_priv, return NULL; } - if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { + if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) { unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; if (bsd_idx == I915_EXEC_BSD_DEFAULT) { @@ -2095,7 +2165,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args, return ERR_PTR(-EINVAL); user = u64_to_user_ptr(args->cliprects_ptr); - if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user))) + if (!access_ok(user, nfences * sizeof(*user))) return ERR_PTR(-EFAULT); fences = kvmalloc_array(nfences, sizeof(*fences), @@ -2162,7 +2232,7 @@ await_fence_array(struct i915_execbuffer *eb, if (!(flags & I915_EXEC_FENCE_WAIT)) continue; - drm_syncobj_search_fence(syncobj, 0, 0, &fence); + fence = drm_syncobj_fence_get(syncobj); if (!fence) return -EINVAL; @@ -2191,7 +2261,7 @@ signal_fence_array(struct i915_execbuffer *eb, if (!(flags & I915_EXEC_FENCE_SIGNAL)) continue; - drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); } } @@ -2205,6 +2275,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; + intel_wakeref_t wakeref; int out_fence_fd = -1; int err; @@ -2240,10 +2311,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (args->flags & I915_EXEC_IS_PINNED) eb.batch_flags |= I915_DISPATCH_PINNED; - eb.engine = eb_select_engine(eb.i915, file, args); - if (!eb.engine) - return -EINVAL; - if (args->flags & I915_EXEC_FENCE_IN) { in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); if (!in_fence) @@ -2268,6 +2335,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_destroy; + eb.engine = eb_select_engine(eb.i915, file, args); + if (!eb.engine) { + err = -EINVAL; + goto err_engine; + } + /* * Take a local wakeref for preparing to dispatch the execbuf as * we expect to access the hardware fairly frequently in the @@ -2275,12 +2348,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, * wakeref that we hold until the GPU has been idle for at least * 100ms. */ - intel_runtime_pm_get(eb.i915); + wakeref = intel_runtime_pm_get(eb.i915); err = i915_mutex_lock_interruptible(dev); if (err) goto err_rpm; + err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */ + if (unlikely(err)) + goto err_unlock; + err = eb_relocate(&eb); if (err) { /* @@ -2425,9 +2502,11 @@ err_batch_unpin: err_vma: if (eb.exec) eb_release_vmas(&eb); +err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: - intel_runtime_pm_put(eb.i915); + intel_runtime_pm_put(eb.i915, wakeref); +err_engine: i915_gem_context_put(eb.ctx); err_destroy: eb_destroy(&eb); @@ -2610,7 +2689,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, unsigned int i; /* Copy the new buffer offsets back to the user's exec list. */ - user_access_begin(); + /* + * Note: count * sizeof(*user_exec_list) does not overflow, + * because we checked 'count' in check_buffer_count(). + * + * And this range already got effectively checked earlier + * when we did the "copy_from_user()" above. + */ + if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + goto end_user; + for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d548ac05ccd7..3084f52e3372 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -21,7 +21,6 @@ * IN THE SOFTWARE. */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence, * and explicitly managed for internal users. */ - if (IS_GEN2(fence->i915)) + if (IS_GEN(fence->i915, 2)) i830_write_fence_reg(fence, vma); - else if (IS_GEN3(fence->i915)) + else if (IS_GEN(fence->i915, 3)) i915_write_fence_reg(fence, vma); else i965_write_fence_reg(fence, vma); @@ -210,6 +209,8 @@ static void fence_write(struct drm_i915_fence_reg *fence, static int fence_update(struct drm_i915_fence_reg *fence, struct i915_vma *vma) { + intel_wakeref_t wakeref; + struct i915_vma *old; int ret; if (vma) { @@ -223,54 +224,61 @@ static int fence_update(struct drm_i915_fence_reg *fence, i915_gem_object_get_tiling(vma->obj))) return -EINVAL; - ret = i915_gem_active_retire(&vma->last_fence, + ret = i915_active_request_retire(&vma->last_fence, &vma->obj->base.dev->struct_mutex); if (ret) return ret; } - if (fence->vma) { - struct i915_vma *old = fence->vma; - - ret = i915_gem_active_retire(&old->last_fence, + old = xchg(&fence->vma, NULL); + if (old) { + ret = i915_active_request_retire(&old->last_fence, &old->obj->base.dev->struct_mutex); - if (ret) + if (ret) { + fence->vma = old; return ret; + } i915_vma_flush_writes(old); - } - if (fence->vma && fence->vma != vma) { - /* Ensure that all userspace CPU access is completed before + /* + * Ensure that all userspace CPU access is completed before * stealing the fence. */ - GEM_BUG_ON(fence->vma->fence != fence); - i915_vma_revoke_mmap(fence->vma); - - fence->vma->fence = NULL; - fence->vma = NULL; + if (old != vma) { + GEM_BUG_ON(old->fence != fence); + i915_vma_revoke_mmap(old); + old->fence = NULL; + } list_move(&fence->link, &fence->i915->mm.fence_list); } - /* We only need to update the register itself if the device is awake. + /* + * We only need to update the register itself if the device is awake. * If the device is currently powered down, we will defer the write * to the runtime resume, see i915_gem_restore_fences(). + * + * This only works for removing the fence register, on acquisition + * the caller must hold the rpm wakeref. The fence register must + * be cleared before we can use any other fences to ensure that + * the new fences do not overlap the elided clears, confusing HW. */ - if (intel_runtime_pm_get_if_in_use(fence->i915)) { - fence_write(fence, vma); - intel_runtime_pm_put(fence->i915); + wakeref = intel_runtime_pm_get_if_in_use(fence->i915); + if (!wakeref) { + GEM_BUG_ON(vma); + return 0; } - if (vma) { - if (fence->vma != vma) { - vma->fence = fence; - fence->vma = vma; - } + WRITE_ONCE(fence->vma, vma); + fence_write(fence, vma); + if (vma) { + vma->fence = fence; list_move_tail(&fence->link, &fence->i915->mm.fence_list); } + intel_runtime_pm_put(fence->i915, wakeref); return 0; } @@ -435,32 +443,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence) } /** - * i915_gem_revoke_fences - revoke fence state - * @dev_priv: i915 device private - * - * Removes all GTT mmappings via the fence registers. This forces any user - * of the fence to reacquire that fence before continuing with their access. - * One use is during GPU reset where the fence register is lost and we need to - * revoke concurrent userspace access via GTT mmaps until the hardware has been - * reset and the fence registers have been restored. - */ -void i915_gem_revoke_fences(struct drm_i915_private *dev_priv) -{ - int i; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; - - GEM_BUG_ON(fence->vma && fence->vma->fence != fence); - - if (fence->vma) - i915_vma_revoke_mmap(fence->vma); - } -} - -/** * i915_gem_restore_fences - restore fence state * @dev_priv: i915 device private * @@ -472,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) { int i; + rcu_read_lock(); /* keep obj alive as we dereference */ for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - struct i915_vma *vma = reg->vma; + struct i915_vma *vma = READ_ONCE(reg->vma); GEM_BUG_ON(vma && vma->fence != reg); @@ -482,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) * Commit delayed tiling changes if we have an object still * attached to the fence, otherwise just clear the fence. */ - if (vma && !i915_gem_object_is_tiled(vma->obj)) { - GEM_BUG_ON(!reg->dirty); - GEM_BUG_ON(i915_vma_has_userfault(vma)); - - list_move(®->link, &dev_priv->mm.fence_list); - vma->fence = NULL; + if (vma && !i915_gem_object_is_tiled(vma->obj)) vma = NULL; - } fence_write(reg, vma); - reg->vma = vma; } + rcu_read_unlock(); } /** @@ -554,8 +531,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) { - uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) { /* @@ -578,7 +555,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } else { - uint32_t dimm_c0, dimm_c1; + u32 dimm_c0, dimm_c1; dimm_c0 = I915_READ(MAD_DIMM_C0); dimm_c1 = I915_READ(MAD_DIMM_C1); dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; @@ -596,21 +573,51 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } - } else if (IS_GEN5(dev_priv)) { + } else if (IS_GEN(dev_priv, 5)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { /* As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (IS_MOBILE(dev_priv) || - IS_I915G(dev_priv) || IS_I945G(dev_priv)) { - uint32_t dcc; + } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) { + /* The 965, G33, and newer, have a very flexible memory + * configuration. It will enable dual-channel mode + * (interleaving) on as much memory as it can, and the GPU + * will additionally sometimes enable different bit 6 + * swizzling for tiled objects from the CPU. + * + * Here's what I found on the G965: + * slot fill memory size swizzling + * 0A 0B 1A 1B 1-ch 2-ch + * 512 0 0 0 512 0 O + * 512 0 512 0 16 1008 X + * 512 0 0 512 16 1008 X + * 0 512 0 512 16 1008 X + * 1024 1024 1024 0 2048 1024 O + * + * We could probably detect this based on either the DRB + * matching, which was the case for the swizzling required in + * the table above, or from the 1-ch value being less than + * the minimum size of a rank. + * + * Reports indicate that the swizzling actually + * varies depending upon page placement inside the + * channels, i.e. we see swizzled pages where the + * banks of memory are paired and unswizzled on the + * uneven portion, so leave that as unknown. + */ + if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } + } else { + u32 dcc; /* On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU @@ -647,7 +654,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) } /* check for L-shaped memory aka modified enhanced addressing */ - if (IS_GEN4(dev_priv) && + if (IS_GEN(dev_priv, 4) && !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -659,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } - } else { - /* The 965, G33, and newer, have a very flexible memory - * configuration. It will enable dual-channel mode - * (interleaving) on as much memory as it can, and the GPU - * will additionally sometimes enable different bit 6 - * swizzling for tiled objects from the CPU. - * - * Here's what I found on the G965: - * slot fill memory size swizzling - * 0A 0B 1A 1B 1-ch 2-ch - * 512 0 0 0 512 0 O - * 512 0 512 0 16 1008 X - * 512 0 0 512 16 1008 X - * 0 512 0 512 16 1008 X - * 1024 1024 1024 0 2048 1024 O - * - * We could probably detect this based on either the DRB - * matching, which was the case for the swizzling required in - * the table above, or from the 1-ch value being less than - * the minimum size of a rank. - * - * Reports indicate that the swizzling actually - * varies depending upon page placement inside the - * channels, i.e. we see swizzled pages where the - * banks of memory are paired and unswizzled on the - * uneven portion, so leave that as unknown. - */ - if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { - swizzle_x = I915_BIT_6_SWIZZLE_9_10; - swizzle_y = I915_BIT_6_SWIZZLE_9; - } } if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || @@ -789,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, int i; if (obj->bit_17 == NULL) { - obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), - sizeof(long), GFP_KERNEL); + obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index 99a31ded4dfd..09dcaf14121b 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -50,4 +50,3 @@ struct drm_i915_fence_reg { }; #endif - diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index add1fe7aeb93..736c845eb77f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -33,11 +33,11 @@ #include <asm/set_memory.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_vgpu.h" +#include "i915_reset.h" #include "i915_trace.h" #include "intel_drv.h" #include "intel_frontbuffer.h" @@ -474,8 +474,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) spin_unlock(&vm->free_pages.lock); } -static void i915_address_space_init(struct i915_address_space *vm, - struct drm_i915_private *dev_priv) +static void i915_address_space_init(struct i915_address_space *vm, int subclass) { /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -483,7 +482,8 @@ static void i915_address_space_init(struct i915_address_space *vm, * attempt holding the lock is immediately reported by lockdep. */ mutex_init(&vm->mutex); - i915_gem_shrinker_taints_mutex(&vm->mutex); + lockdep_set_subclass(&vm->mutex, subclass); + i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); GEM_BUG_ON(!vm->total); drm_mm_init(&vm->mm, 0, vm->total); @@ -491,9 +491,8 @@ static void i915_address_space_init(struct i915_address_space *vm, stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->active_list); - INIT_LIST_HEAD(&vm->inactive_list); INIT_LIST_HEAD(&vm->unbound_list); + INIT_LIST_HEAD(&vm->bound_list); } static void i915_address_space_fini(struct i915_address_space *vm) @@ -585,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) * for all. */ size = I915_GTT_PAGE_SIZE_4K; - if (i915_vm_is_48bit(vm) && + if (i915_vm_is_4lvl(vm) && HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { size = I915_GTT_PAGE_SIZE_64K; gfp |= __GFP_NOWARN; @@ -614,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) vm->scratch_page.page = page; vm->scratch_page.daddr = addr; - vm->scratch_page.order = order; + vm->scratch_order = order; return 0; unmap_page: @@ -633,10 +632,11 @@ skip: static void cleanup_scratch_page(struct i915_address_space *vm) { struct i915_page_dma *p = &vm->scratch_page; + int order = vm->scratch_order; - dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT, + dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, PCI_DMA_BIDIRECTIONAL); - __free_pages(p->page, p->order); + __free_pages(p->page, order); } static struct i915_page_table *alloc_pt(struct i915_address_space *vm) @@ -727,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp) pdp->page_directory = NULL; } -static inline bool use_4lvl(const struct i915_address_space *vm) -{ - return i915_vm_is_48bit(vm); -} - static struct i915_page_directory_pointer * alloc_pdp(struct i915_address_space *vm) { struct i915_page_directory_pointer *pdp; int ret = -ENOMEM; - GEM_BUG_ON(!use_4lvl(vm)); + GEM_BUG_ON(!i915_vm_is_4lvl(vm)); pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); if (!pdp) @@ -767,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm, { __pdp_fini(pdp); - if (!use_4lvl(vm)) + if (!i915_vm_is_4lvl(vm)) return; cleanup_px(vm, pdp); @@ -792,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); } -/* PDE TLBs are a pain to invalidate on GEN8+. When we modify +/* + * PDE TLBs are a pain to invalidate on GEN8+. When we modify * the page table structures, we mark them dirty so that * context switching/execlist queuing code takes extra steps * to ensure that tlbs are flushed. */ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { - ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; + ppgtt->pd_dirty_engines = ALL_ENGINES; } /* Removes entries from a single page table, releasing it if it's empty. @@ -810,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, u64 start, u64 length) { unsigned int num_entries = gen8_pte_count(start, length); - unsigned int pte = gen8_pte_index(start); - unsigned int pte_end = pte + num_entries; gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -821,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, return true; vaddr = kmap_atomic_px(pt); - while (pte < pte_end) - vaddr[pte++] = vm->scratch_pte; + memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries); kunmap_atomic(vaddr); return false; @@ -873,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, gen8_ppgtt_pdpe_t *vaddr; pdp->page_directory[pdpe] = pd; - if (!use_4lvl(vm)) + if (!i915_vm_is_4lvl(vm)) return; vaddr = kmap_atomic_px(pdp); @@ -938,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, struct i915_page_directory_pointer *pdp; unsigned int pml4e; - GEM_BUG_ON(!use_4lvl(vm)); + GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { GEM_BUG_ON(pdp == vm->scratch_pdp); @@ -1220,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) GEM_BUG_ON(!clone->has_read_only); - vm->scratch_page.order = clone->scratch_page.order; + vm->scratch_order = clone->scratch_order; vm->scratch_pte = clone->scratch_pte; vm->scratch_pt = clone->scratch_pt; vm->scratch_pd = clone->scratch_pd; @@ -1249,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) goto free_pt; } - if (use_4lvl(vm)) { + if (i915_vm_is_4lvl(vm)) { vm->scratch_pdp = alloc_pdp(vm); if (IS_ERR(vm->scratch_pdp)) { ret = PTR_ERR(vm->scratch_pdp); @@ -1259,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pt(vm, vm->scratch_pt); gen8_initialize_pd(vm, vm->scratch_pd); - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; @@ -1281,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) enum vgt_g2v_type msg; int i; - if (use_4lvl(vm)) { + if (i915_vm_is_4lvl(vm)) { const u64 daddr = px_dma(&ppgtt->pml4); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); @@ -1311,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm) if (!vm->scratch_page.daddr) return; - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) free_pdp(vm, vm->scratch_pdp); free_pd(vm, vm->scratch_pd); free_pt(vm, vm->scratch_pt); @@ -1357,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) if (intel_vgpu_active(dev_priv)) gen8_ppgtt_notify_vgt(ppgtt, false); - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) gen8_ppgtt_cleanup_4lvl(ppgtt); else gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); @@ -1423,8 +1416,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, gen8_initialize_pd(vm, pd); gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); - - mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); } ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); @@ -1490,84 +1481,6 @@ unwind: return -ENOMEM; } -static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, - struct i915_page_directory_pointer *pdp, - u64 start, u64 length, - gen8_pte_t scratch_pte, - struct seq_file *m) -{ - struct i915_address_space *vm = &ppgtt->vm; - struct i915_page_directory *pd; - u32 pdpe; - - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - struct i915_page_table *pt; - u64 pd_len = length; - u64 pd_start = start; - u32 pde; - - if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd) - continue; - - seq_printf(m, "\tPDPE #%d\n", pdpe); - gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { - u32 pte; - gen8_pte_t *pt_vaddr; - - if (pd->page_table[pde] == ppgtt->vm.scratch_pt) - continue; - - pt_vaddr = kmap_atomic_px(pt); - for (pte = 0; pte < GEN8_PTES; pte += 4) { - u64 va = (pdpe << GEN8_PDPE_SHIFT | - pde << GEN8_PDE_SHIFT | - pte << GEN8_PTE_SHIFT); - int i; - bool found = false; - - for (i = 0; i < 4; i++) - if (pt_vaddr[pte + i] != scratch_pte) - found = true; - if (!found) - continue; - - seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); - for (i = 0; i < 4; i++) { - if (pt_vaddr[pte + i] != scratch_pte) - seq_printf(m, " %llx", pt_vaddr[pte + i]); - else - seq_puts(m, " SCRATCH "); - } - seq_puts(m, "\n"); - } - kunmap_atomic(pt_vaddr); - } - } -} - -static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) -{ - struct i915_address_space *vm = &ppgtt->vm; - const gen8_pte_t scratch_pte = vm->scratch_pte; - u64 start = 0, length = ppgtt->vm.total; - - if (use_4lvl(vm)) { - u64 pml4e; - struct i915_pml4 *pml4 = &ppgtt->pml4; - struct i915_page_directory_pointer *pdp; - - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp) - continue; - - seq_printf(m, " PML4E #%llu\n", pml4e); - gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m); - } - } else { - gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m); - } -} - static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->vm; @@ -1600,6 +1513,23 @@ unwind: return -ENOMEM; } +static void ppgtt_init(struct drm_i915_private *i915, + struct i915_hw_ppgtt *ppgtt) +{ + kref_init(&ppgtt->ref); + + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; +} + /* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -1616,20 +1546,11 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - kref_init(&ppgtt->ref); - - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - - ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? - 1ULL << 48 : - 1ULL << 32; + ppgtt_init(i915, ppgtt); /* From bdw, there is support for read-only pages in the PPGTT. */ ppgtt->vm.has_read_only = true; - i915_address_space_init(&ppgtt->vm, i915); - /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ @@ -1640,7 +1561,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (err) goto err_free; - if (use_4lvl(&ppgtt->vm)) { + if (i915_vm_is_4lvl(&ppgtt->vm)) { err = setup_px(&ppgtt->vm, &ppgtt->pml4); if (err) goto err_scratch; @@ -1672,12 +1593,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) gen8_ppgtt_notify_vgt(ppgtt, true); ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - ppgtt->debug_dump = gen8_dump_ppgtt; - - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; return ppgtt; @@ -1688,60 +1603,6 @@ err_free: return ERR_PTR(err); } -static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) -{ - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); - const gen6_pte_t scratch_pte = base->vm.scratch_pte; - struct i915_page_table *pt; - u32 pte, pde; - - gen6_for_all_pdes(pt, &base->pd, pde) { - gen6_pte_t *vaddr; - - if (pt == base->vm.scratch_pt) - continue; - - if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { - u32 expected = - GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | - GEN6_PDE_VALID; - u32 pd_entry = readl(ppgtt->pd_addr + pde); - - if (pd_entry != expected) - seq_printf(m, - "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", - pde, - pd_entry, - expected); - - seq_printf(m, "\tPDE: %x\n", pd_entry); - } - - vaddr = kmap_atomic_px(base->pd.page_table[pde]); - for (pte = 0; pte < GEN6_PTES; pte += 4) { - int i; - - for (i = 0; i < 4; i++) - if (vaddr[pte + i] != scratch_pte) - break; - if (i == 4) - continue; - - seq_printf(m, "\t\t(%03d, %04d) %08llx: ", - pde, pte, - (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); - for (i = 0; i < 4; i++) { - if (vaddr[pte + i] != scratch_pte) - seq_printf(m, " %08x", vaddr[pte + i]); - else - seq_puts(m, " SCRATCH"); - } - seq_puts(m, "\n"); - } - kunmap_atomic(vaddr); - } -} - /* Write pde (index) from the page directory @pd to the page table @pt */ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, const unsigned int pde, @@ -1808,8 +1669,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, while (num_entries) { struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; - const unsigned int end = min(pte + num_entries, GEN6_PTES); - const unsigned int count = end - pte; + const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; GEM_BUG_ON(pt == vm->scratch_pt); @@ -1829,9 +1689,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, */ vaddr = kmap_atomic_px(pt); - do { - vaddr[pte++] = scratch_pte; - } while (pte < end); + memset32(vaddr + pte, scratch_pte, count); kunmap_atomic(vaddr); pte = 0; @@ -2049,25 +1907,27 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(size > ggtt->vm.total); - vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); + vma = i915_vma_alloc(); if (!vma) return ERR_PTR(-ENOMEM); - init_request_active(&vma->last_fence, NULL); + i915_active_init(i915, &vma->active, NULL); + INIT_ACTIVE_REQUEST(&vma->last_fence); vma->vm = &ggtt->vm; vma->ops = &pd_vma_ops; vma->private = ppgtt; - vma->active = RB_ROOT; - vma->size = size; vma->fence_size = size; vma->flags = I915_VMA_GGTT; vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ INIT_LIST_HEAD(&vma->obj_link); + + mutex_lock(&vma->vm->mutex); list_add(&vma->vm_link, &vma->vm->unbound_list); + mutex_unlock(&vma->vm->mutex); return vma; } @@ -2075,6 +1935,9 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; + + GEM_BUG_ON(ppgtt->base.vm.closed); /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -2090,9 +1953,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - return i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + ppgtt->pin_count = 0; + return err; } void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) @@ -2106,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) i915_vma_unpin(ppgtt->vma); } +void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base) +{ + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + + if (!ppgtt->pin_count) + return; + + ppgtt->pin_count = 0; + i915_vma_unpin(ppgtt->vma); +} + static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) { struct i915_ggtt * const ggtt = &i915->ggtt; @@ -2116,25 +1998,12 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - kref_init(&ppgtt->base.ref); - - ppgtt->base.vm.i915 = i915; - ppgtt->base.vm.dma = &i915->drm.pdev->dev; - - ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; - - i915_address_space_init(&ppgtt->base.vm, i915); + ppgtt_init(i915, &ppgtt->base); ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; - ppgtt->base.debug_dump = gen6_dump_ppgtt; - - ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->base.vm.vma_ops.clear_pages = clear_pages; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; @@ -2195,9 +2064,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) { gtt_write_workarounds(dev_priv); - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) gen6_ppgtt_enable(dev_priv); - else if (IS_GEN7(dev_priv)) + else if (IS_GEN(dev_priv, 7)) gen7_ppgtt_enable(dev_priv); return 0; @@ -2213,8 +2082,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915) } struct i915_hw_ppgtt * -i915_ppgtt_create(struct drm_i915_private *i915, - struct drm_i915_file_private *fpriv) +i915_ppgtt_create(struct drm_i915_private *i915) { struct i915_hw_ppgtt *ppgtt; @@ -2222,24 +2090,15 @@ i915_ppgtt_create(struct drm_i915_private *i915, if (IS_ERR(ppgtt)) return ppgtt; - ppgtt->vm.file = fpriv; - trace_i915_ppgtt_create(&ppgtt->vm); return ppgtt; } -void i915_ppgtt_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(vm->closed); - vm->closed = true; -} - static void ppgtt_destroy_vma(struct i915_address_space *vm) { struct list_head *phases[] = { - &vm->active_list, - &vm->inactive_list, + &vm->bound_list, &vm->unbound_list, NULL, }, **phase; @@ -2262,8 +2121,7 @@ void i915_ppgtt_release(struct kref *kref) ppgtt_destroy_vma(&ppgtt->vm); - GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); - GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); + GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); ppgtt->vm.cleanup(&ppgtt->vm); @@ -2279,7 +2137,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) /* Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); + return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); } static void gen6_check_faults(struct drm_i915_private *dev_priv) @@ -2372,7 +2230,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, DMA_ATTR_NO_WARN)) return 0; - /* If the DMA remap fails, one cause can be that we have + /* + * If the DMA remap fails, one cause can be that we have * too many objects pinned in a small remapping table, * such as swiotlb. Incrementally purge all other objects and * try again - if there are no more pages to remove from @@ -2382,8 +2241,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, } while (i915_gem_shrink(to_i915(obj->base.dev), obj->base.size >> PAGE_SHIFT, NULL, I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE)); + I915_SHRINK_UNBOUND)); return -ENOSPC; } @@ -2655,6 +2513,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, { struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; + intel_wakeref_t wakeref; u32 pte_flags; /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ @@ -2662,9 +2521,8 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2681,10 +2539,10 @@ static int ggtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } static int aliasing_gtt_bind_vma(struct i915_vma *vma, @@ -2716,9 +2574,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, } if (flags & I915_VMA_GLOBAL_BIND) { - intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915); + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) { + vma->vm->insert_entries(vma->vm, vma, + cache_level, pte_flags); + } } return 0; @@ -2729,9 +2590,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; if (vma->flags & I915_VMA_GLOBAL_BIND) { - intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915); + struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) + vm->clear_range(vm, vma->node.start, vma->size); } if (vma->flags & I915_VMA_LOCAL_BIND) { @@ -2798,7 +2661,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_hw_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); + ppgtt = i915_ppgtt_create(i915); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2923,8 +2786,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_fini_aliasing_ppgtt(dev_priv); - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); if (drm_mm_node_allocated(&ggtt->error_capture)) @@ -3355,7 +3217,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_entries = gen8_ggtt_insert_entries; /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { + if (intel_ggtt_update_needs_vtd_wa(dev_priv) || + IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; if (ggtt->vm.clear_range != nop_clear_range) @@ -3556,7 +3419,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * and beyond the end of the GTT if we do not provide a guard. */ mutex_lock(&dev_priv->drm.struct_mutex); - i915_address_space_init(&ggtt->vm, dev_priv); + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); ggtt->vm.is_ggtt = true; @@ -3629,32 +3492,39 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) i915_check_and_clear_faults(dev_priv); + mutex_lock(&ggtt->vm.mutex); + /* First fill our portion of the GTT with scratch pages */ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ /* clflush objects bound into the GGTT and rebind them. */ - GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); - list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { + list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; if (!(vma->flags & I915_VMA_GLOBAL_BIND)) continue; + mutex_unlock(&ggtt->vm.mutex); + if (!i915_vma_unbind(vma)) - continue; + goto lock; WARN_ON(i915_vma_bind(vma, obj ? obj->cache_level : 0, PIN_UPDATE)); if (obj) WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + +lock: + mutex_lock(&ggtt->vm.mutex); } ggtt->vm.closed = false; i915_ggtt_invalidate(dev_priv); + mutex_unlock(&ggtt->vm.mutex); + if (INTEL_GEN(dev_priv) >= 8) { struct intel_ppat *ppat = &dev_priv->ppat; @@ -3817,7 +3687,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) } ret = 0; - if (unlikely(IS_ERR(vma->pages))) { + if (IS_ERR(vma->pages)) { ret = PTR_ERR(vma->pages); vma->pages = NULL; DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 4874da09a3c4..f597f35b109b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -39,6 +39,7 @@ #include <linux/pagevec.h> #include "i915_request.h" +#include "i915_reset.h" #include "i915_selftest.h" #include "i915_timeline.h" @@ -212,7 +213,6 @@ struct i915_vma; struct i915_page_dma { struct page *page; - int order; union { dma_addr_t daddr; @@ -288,40 +288,23 @@ struct i915_address_space { bool closed; struct mutex mutex; /* protects vma and our lists */ +#define VM_CLASS_GGTT 0 +#define VM_CLASS_PPGTT 1 u64 scratch_pte; + int scratch_order; struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ /** - * List of objects currently involved in rendering. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_read_req - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. + * List of vma currently bound. */ - struct list_head active_list; + struct list_head bound_list; /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * - * last_read_req is NULL while an object is in this list. - * - * A reference is not held on the buffer while on this list, - * as merely being GTT-bound shouldn't prevent its being - * freed, and we'll pull it off the list in the free path. - */ - struct list_head inactive_list; - - /** - * List of vma that have been unbound. - * - * A reference is not held on the buffer while on this list. + * List of vma that are not unbound. */ struct list_head unbound_list; @@ -365,7 +348,7 @@ struct i915_address_space { #define i915_is_ggtt(vm) ((vm)->is_ggtt) static inline bool -i915_vm_is_48bit(const struct i915_address_space *vm) +i915_vm_is_4lvl(const struct i915_address_space *vm) { return (vm->total - 1) >> 32; } @@ -373,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm) static inline bool i915_vm_has_scratch_64K(struct i915_address_space *vm) { - return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K); + return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); } /* The Graphics Translation Table is the way in which GEN hardware translates a @@ -407,14 +390,14 @@ struct i915_hw_ppgtt { struct i915_address_space vm; struct kref ref; - unsigned long pd_dirty_rings; + intel_engine_mask_t pd_dirty_engines; union { struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ struct i915_page_directory_pointer pdp; /* GEN8+ */ struct i915_page_directory pd; /* GEN6-7 */ }; - void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); + u32 user_handle; }; struct gen6_hw_ppgtt { @@ -507,7 +490,7 @@ static inline u32 gen6_pde_index(u32 addr) static inline unsigned int i915_pdpes_per_pdp(const struct i915_address_space *vm) { - if (i915_vm_is_48bit(vm)) + if (i915_vm_is_4lvl(vm)) return GEN8_PML4ES_PER_PML4; return GEN8_3LVL_PDPES; @@ -622,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); + +struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); -struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv); -void i915_ppgtt_close(struct i915_address_space *vm); -static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) + +static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { - if (ppgtt) - kref_get(&ppgtt->ref); + kref_get(&ppgtt->ref); + return ppgtt; } + static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) { if (ppgtt) @@ -639,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); +void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); @@ -661,19 +646,19 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, /* Flags used by pin/bind&friends. */ #define PIN_NONBLOCK BIT_ULL(0) -#define PIN_MAPPABLE BIT_ULL(1) -#define PIN_ZONE_4G BIT_ULL(2) -#define PIN_NONFAULT BIT_ULL(3) -#define PIN_NOEVICT BIT_ULL(4) - -#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT_ULL(8) - -#define PIN_HIGH BIT_ULL(9) -#define PIN_OFFSET_BIAS BIT_ULL(10) -#define PIN_OFFSET_FIXED BIT_ULL(11) +#define PIN_NONFAULT BIT_ULL(1) +#define PIN_NOEVICT BIT_ULL(2) +#define PIN_MAPPABLE BIT_ULL(3) +#define PIN_ZONE_4G BIT_ULL(4) +#define PIN_HIGH BIT_ULL(5) +#define PIN_OFFSET_BIAS BIT_ULL(6) +#define PIN_OFFSET_FIXED BIT_ULL(7) + +#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT_ULL(11) + #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 0d0144b2104c..ab627ed1269c 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -22,7 +22,6 @@ * */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -194,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c index aab8cdd80e6d..ac6a5ab84586 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.c +++ b/drivers/gpu/drm/i915/i915_gem_object.c @@ -24,6 +24,22 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_globals.h" + +static struct i915_global_object { + struct i915_global base; + struct kmem_cache *slab_objects; +} global; + +struct drm_i915_gem_object *i915_gem_object_alloc(void) +{ + return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); +} + +void i915_gem_object_free(struct drm_i915_gem_object *obj) +{ + return kmem_cache_free(global.slab_objects, obj); +} /** * Mark up the object's coherency levels for a given cache_level @@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, obj->cache_dirty = !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } + +static void i915_global_objects_shrink(void) +{ + kmem_cache_shrink(global.slab_objects); +} + +static void i915_global_objects_exit(void) +{ + kmem_cache_destroy(global.slab_objects); +} + +static struct i915_global_object global = { { + .shrink = i915_global_objects_shrink, + .exit = i915_global_objects_exit, +} }; + +int __init i915_global_objects_init(void) +{ + global.slab_objects = + KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); + if (!global.slab_objects) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index a6dd7c46de0d..ca93a40c0c87 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -29,7 +29,8 @@ #include <drm/drm_vma_manager.h> #include <drm/drm_gem.h> -#include <drm/drmP.h> +#include <drm/drm_file.h> +#include <drm/drm_device.h> #include <drm/i915_drm.h> @@ -56,6 +57,7 @@ struct drm_i915_gem_object_ops { #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) #define I915_GEM_OBJECT_IS_PROXY BIT(2) +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -85,24 +87,33 @@ struct drm_i915_gem_object { const struct drm_i915_gem_object_ops *ops; - /** - * @vma_list: List of VMAs backed by this object - * - * The VMA on this list are ordered by type, all GGTT vma are placed - * at the head and all ppGTT vma are placed at the tail. The different - * types of GGTT vma are unordered between themselves, use the - * @vma_tree (which has a defined order between all VMA) to find an - * exact match. - */ - struct list_head vma_list; - /** - * @vma_tree: Ordered tree of VMAs backed by this object - * - * All VMA created for this object are placed in the @vma_tree for - * fast retrieval via a binary search in i915_vma_instance(). - * They are also added to @vma_list for easy iteration. - */ - struct rb_root vma_tree; + struct { + /** + * @vma.lock: protect the list/tree of vmas + */ + spinlock_t lock; + + /** + * @vma.list: List of VMAs backed by this object + * + * The VMA on this list are ordered by type, all GGTT vma are + * placed at the head and all ppGTT vma are placed at the tail. + * The different types of GGTT vma are unordered between + * themselves, use the @vma.tree (which has a defined order + * between all VMA) to quickly find an exact match. + */ + struct list_head list; + + /** + * @vma.tree: Ordered tree of VMAs backed by this object + * + * All VMA created for this object are placed in the @vma.tree + * for fast retrieval via a binary search in + * i915_vma_instance(). They are also added to @vma.list for + * easy iteration. + */ + struct rb_root tree; + } vma; /** * @lut_list: List of vma lookup entries in use for this object. @@ -164,7 +175,7 @@ struct drm_i915_gem_object { atomic_t frontbuffer_bits; unsigned int frontbuffer_ggtt_origin; /* write once */ - struct i915_gem_active frontbuffer_write; + struct i915_active_request frontbuffer_write; /** Current tiling stride for the object, if it's tiled. */ unsigned int tiling_and_stride; @@ -293,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem) return container_of(gem, struct drm_i915_gem_object, base); } +struct drm_i915_gem_object *i915_gem_object_alloc(void); +void i915_gem_object_free(struct drm_i915_gem_object *obj); + /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle * @filp: DRM file private date @@ -387,6 +401,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; +} + +static inline bool i915_gem_object_is_active(const struct drm_i915_gem_object *obj) { return obj->active_count; @@ -482,5 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); -#endif +void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, + struct sg_table *pages, + bool needs_clflush); +#endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 90baf9086d0a..9440024c763f 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -42,7 +42,7 @@ struct intel_render_state { static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) { - if (engine->id != RCS) + if (engine->id != RCS0) return NULL; switch (INTEL_GEN(engine->i915)) { @@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so, drm_clflush_virt_range(d, i * sizeof(u32)); kunmap_atomic(d); - ret = i915_gem_object_set_to_gtt_domain(so->obj, false); + ret = 0; out: i915_gem_obj_finish_shmem_access(so->obj); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index ea90d3a0d511..6da795c7e62e 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -30,30 +30,27 @@ #include <linux/pci.h> #include <linux/dma-buf.h> #include <linux/vmalloc.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" -static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock) +static bool shrinker_lock(struct drm_i915_private *i915, + unsigned int flags, + bool *unlock) { - switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) { + struct mutex *m = &i915->drm.struct_mutex; + + switch (mutex_trylock_recursive(m)) { case MUTEX_TRYLOCK_RECURSIVE: *unlock = false; return true; case MUTEX_TRYLOCK_FAILED: *unlock = false; - preempt_disable(); - do { - cpu_relax(); - if (mutex_trylock(&i915->drm.struct_mutex)) { - *unlock = true; - break; - } - } while (!need_resched()); - preempt_enable(); + if (flags & I915_SHRINK_ACTIVE && + mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0) + *unlock = true; return *unlock; case MUTEX_TRYLOCK_SUCCESS: @@ -156,11 +153,12 @@ i915_gem_shrink(struct drm_i915_private *i915, { &i915->mm.bound_list, I915_SHRINK_BOUND }, { NULL, 0 }, }, *phase; + intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; bool unlock; - if (!shrinker_lock(i915, &unlock)) + if (!shrinker_lock(i915, flags, &unlock)) return 0; /* @@ -185,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915, * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier. */ - if ((flags & I915_SHRINK_BOUND) && - !intel_runtime_pm_get_if_in_use(i915)) - flags &= ~I915_SHRINK_BOUND; + if (flags & I915_SHRINK_BOUND) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (!wakeref) + flags &= ~I915_SHRINK_BOUND; + } /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -268,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (flags & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); i915_retire_requests(i915); @@ -295,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915, */ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) { - unsigned long freed; - - intel_runtime_pm_get(i915); - freed = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); - intel_runtime_pm_put(i915); + intel_wakeref_t wakeref; + unsigned long freed = 0; + + with_intel_runtime_pm(i915, wakeref) { + freed = i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); + } return freed; } @@ -357,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) sc->nr_scanned = 0; - if (!shrinker_lock(i915, &unlock)) + if (!shrinker_lock(i915, 0, &unlock)) return SHRINK_STOP; freed = i915_gem_shrink(i915, @@ -373,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { - intel_runtime_pm_get(i915); - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_ACTIVE | - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915); + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) { + freed += i915_gem_shrink(i915, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, + I915_SHRINK_ACTIVE | + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); + } } shrinker_unlock(i915, unlock); @@ -388,31 +391,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) return sc->nr_scanned ? freed : SHRINK_STOP; } -static bool -shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock, - int timeout_ms) -{ - unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); - - do { - if (i915_gem_wait_for_idle(i915, - 0, MAX_SCHEDULE_TIMEOUT) == 0 && - shrinker_lock(i915, unlock)) - break; - - schedule_timeout_killable(1); - if (fatal_signal_pending(current)) - return false; - - if (time_after(jiffies, timeout)) { - pr_err("Unable to lock GPU to purge memory.\n"); - return false; - } - } while (1); - - return true; -} - static int i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -420,8 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) container_of(nb, struct drm_i915_private, mm.oom_notifier); struct drm_i915_gem_object *obj; unsigned long unevictable, bound, unbound, freed_pages; + intel_wakeref_t wakeref; - freed_pages = i915_gem_shrink_all(i915); + freed_pages = 0; + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -447,10 +430,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) pr_info("Purging GPU memory, %lu pages freed, " "%lu pages still pinned.\n", freed_pages, unevictable); - if (unbound || bound) - pr_err("%lu and %lu pages still available in the " - "bound and unbound GPU page lists.\n", - bound, unbound); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; @@ -463,34 +442,39 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct i915_vma *vma, *next; unsigned long freed_pages = 0; + intel_wakeref_t wakeref; bool unlock; - int ret; - if (!shrinker_lock_uninterruptible(i915, &unlock, 5000)) + if (!shrinker_lock(i915, 0, &unlock)) return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) + if (i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT)) goto out; - intel_runtime_pm_get(i915); - freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE | - I915_SHRINK_VMAPS); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ + mutex_lock(&i915->ggtt.vm.mutex); list_for_each_entry_safe(vma, next, - &i915->ggtt.vm.inactive_list, vm_link) { + &i915->ggtt.vm.bound_list, vm_link) { unsigned long count = vma->node.size >> PAGE_SHIFT; - if (vma->iomap && i915_vma_unbind(vma) == 0) + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; + + mutex_unlock(&i915->ggtt.vm.mutex); + if (i915_vma_unbind(vma) == 0) freed_pages += count; + mutex_lock(&i915->ggtt.vm.mutex); } + mutex_unlock(&i915->ggtt.vm.mutex); out: shrinker_unlock(i915, unlock); @@ -533,13 +517,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915) unregister_shrinker(&i915->mm.shrinker); } -void i915_gem_shrinker_taints_mutex(struct mutex *mutex) +void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, + struct mutex *mutex) { + bool unlock = false; + if (!IS_ENABLED(CONFIG_LOCKDEP)) return; + if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) { + mutex_acquire(&i915->drm.struct_mutex.dep_map, + I915_MM_NORMAL, 0, _RET_IP_); + unlock = true; + } + fs_reclaim_acquire(GFP_KERNEL); - mutex_lock(mutex); - mutex_unlock(mutex); + + /* + * As we invariably rely on the struct_mutex within the shrinker, + * but have a complicated recursion dance, taint all the mutexes used + * within the shrinker with the struct_mutex. For completeness, we + * taint with all subclass of struct_mutex, even though we should + * only need tainting by I915_MM_NORMAL to catch possible ABBA + * deadlocks from using struct_mutex inside @mutex. + */ + mutex_acquire(&i915->drm.struct_mutex.dep_map, + I915_MM_SHRINKER, 0, _RET_IP_); + + mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); + mutex_release(&mutex->dep_map, 0, _RET_IP_); + + mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); + fs_reclaim_release(GFP_KERNEL); + + if (unlock) + mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f29a7ff7c362..0a8082cfc761 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -26,7 +26,6 @@ * */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, resource_size_t ggtt_start; ggtt_start = I915_READ(PGTBL_CTL); - if (IS_GEN4(dev_priv)) + if (IS_GEN(dev_priv, 4)) ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else @@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, * GEN3 firmware likes to smash pci bridges into the stolen * range. Apparently this works. */ - if (r == NULL && !IS_GEN3(dev_priv)) { + if (r == NULL && !IS_GEN(dev_priv, 3)) { DRM_ERROR("conflict detected with stolen region: %pR\n", dsm); @@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ - WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val); + WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", + reg_val); if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) return; @@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; unsigned int cache_level; - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; @@ -701,7 +701,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv vma->pages = obj->mm.pages; vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list); + + mutex_lock(&ggtt->vm.mutex); + list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); + mutex_unlock(&ggtt->vm.mutex); spin_lock(&dev_priv->mm.obj_lock); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index d9dc9df523b5..a9b5329dae3b 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -27,7 +27,6 @@ #include <linux/string.h> #include <linux/bitops.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915, } /* Previous chips need a power-of-two fence region when tiling */ - if (IS_GEN3(i915)) + if (IS_GEN(i915, 3)) ggtt_size = 1024*1024; else ggtt_size = 512*1024; @@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, return false; } - if (IS_GEN2(i915) || + if (IS_GEN(i915, 2) || (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) tile_width = 128; else @@ -302,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (!obj->bit_17) { - obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), - sizeof(long), GFP_KERNEL); + obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, + GFP_KERNEL); } } else { - kfree(obj->bit_17); + bitmap_free(obj->bit_17); obj->bit_17 = NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 2c9b284036d1..215bf3fef10c 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -22,7 +22,6 @@ * */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" @@ -50,94 +49,86 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root_cached objects; - struct workqueue_struct *wq; + struct i915_mm_struct *mm; }; struct i915_mmu_object { struct i915_mmu_notifier *mn; struct drm_i915_gem_object *obj; struct interval_tree_node it; - struct list_head link; - struct work_struct work; - bool attached; }; -static void cancel_userptr(struct work_struct *work) +static void add_object(struct i915_mmu_object *mo) { - struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); - struct drm_i915_gem_object *obj = mo->obj; - struct work_struct *active; - - /* Cancel any active worker and force us to re-evaluate gup */ - mutex_lock(&obj->mm.lock); - active = fetch_and_zero(&obj->userptr.work); - mutex_unlock(&obj->mm.lock); - if (active) - goto out; - - i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); - - mutex_lock(&obj->base.dev->struct_mutex); - - /* We are inside a kthread context and can't be interrupted */ - if (i915_gem_object_unbind(obj) == 0) - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - WARN_ONCE(i915_gem_object_has_pages(obj), - "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", - obj->bind_count, - atomic_read(&obj->mm.pages_pin_count), - obj->pin_global); - - mutex_unlock(&obj->base.dev->struct_mutex); - -out: - i915_gem_object_put(obj); + GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb)); + interval_tree_insert(&mo->it, &mo->mn->objects); } -static void add_object(struct i915_mmu_object *mo) +static void del_object(struct i915_mmu_object *mo) { - if (mo->attached) + if (RB_EMPTY_NODE(&mo->it.rb)) return; - interval_tree_insert(&mo->it, &mo->mn->objects); - mo->attached = true; + interval_tree_remove(&mo->it, &mo->mn->objects); + RB_CLEAR_NODE(&mo->it.rb); } -static void del_object(struct i915_mmu_object *mo) +static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) { - if (!mo->attached) + struct i915_mmu_object *mo = obj->userptr.mmu_object; + + /* + * During mm_invalidate_range we need to cancel any userptr that + * overlaps the range being invalidated. Doing so requires the + * struct_mutex, and that risks recursion. In order to cause + * recursion, the user must alias the userptr address space with + * a GTT mmapping (possible with a MAP_FIXED) - then when we have + * to invalidate that mmaping, mm_invalidate_range is called with + * the userptr address *and* the struct_mutex held. To prevent that + * we set a flag under the i915_mmu_notifier spinlock to indicate + * whether this object is valid. + */ + if (!mo) return; - interval_tree_remove(&mo->it, &mo->mn->objects); - mo->attached = false; + spin_lock(&mo->mn->lock); + if (value) + add_object(mo); + else + del_object(mo); + spin_unlock(&mo->mn->lock); } -static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) +static int +userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, + const struct mmu_notifier_range *range) { struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); - struct i915_mmu_object *mo; struct interval_tree_node *it; - LIST_HEAD(cancelled); + struct mutex *unlock = NULL; + unsigned long end; + int ret = 0; if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return 0; /* interval ranges are inclusive, but invalidate range is exclusive */ - end--; + end = range->end - 1; spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, start, end); + it = interval_tree_iter_first(&mn->objects, range->start, end); while (it) { - if (!blockable) { - spin_unlock(&mn->lock); - return -EAGAIN; + struct drm_i915_gem_object *obj; + + if (!range->blockable) { + ret = -EAGAIN; + break; } - /* The mmu_object is released late when destroying the + + /* + * The mmu_object is released late when destroying the * GEM object so it is entirely possible to gain a * reference on an object in the process of being freed * since our serialisation is via the spinlock and not @@ -146,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, * use-after-free we only acquire a reference on the * object if it is not in the process of being destroyed. */ - mo = container_of(it, struct i915_mmu_object, it); - if (kref_get_unless_zero(&mo->obj->base.refcount)) - queue_work(mn->wq, &mo->work); + obj = container_of(it, struct i915_mmu_object, it)->obj; + if (!kref_get_unless_zero(&obj->base.refcount)) { + it = interval_tree_iter_next(it, range->start, end); + continue; + } + spin_unlock(&mn->lock); + + if (!unlock) { + unlock = &mn->mm->i915->drm.struct_mutex; + + switch (mutex_trylock_recursive(unlock)) { + default: + case MUTEX_TRYLOCK_FAILED: + if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { + i915_gem_object_put(obj); + return -EINTR; + } + /* fall through */ + case MUTEX_TRYLOCK_SUCCESS: + break; + + case MUTEX_TRYLOCK_RECURSIVE: + unlock = ERR_PTR(-EEXIST); + break; + } + } + + ret = i915_gem_object_unbind(obj); + if (ret == 0) + ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + i915_gem_object_put(obj); + if (ret) + goto unlock; - list_add(&mo->link, &cancelled); - it = interval_tree_iter_next(it, start, end); + spin_lock(&mn->lock); + + /* + * As we do not (yet) protect the mmu from concurrent insertion + * over this range, there is no guarantee that this search will + * terminate given a pathologic workload. + */ + it = interval_tree_iter_first(&mn->objects, range->start, end); } - list_for_each_entry(mo, &cancelled, link) - del_object(mo); spin_unlock(&mn->lock); - if (!list_empty(&cancelled)) - flush_workqueue(mn->wq); +unlock: + if (!IS_ERR_OR_NULL(unlock)) + mutex_unlock(unlock); + + return ret; - return 0; } static const struct mmu_notifier_ops i915_gem_userptr_notifier = { - .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, + .invalidate_range_start = userptr_mn_invalidate_range_start, }; static struct i915_mmu_notifier * -i915_mmu_notifier_create(struct mm_struct *mm) +i915_mmu_notifier_create(struct i915_mm_struct *mm) { struct i915_mmu_notifier *mn; @@ -179,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT_CACHED; - mn->wq = alloc_workqueue("i915-userptr-release", - WQ_UNBOUND | WQ_MEM_RECLAIM, - 0); - if (mn->wq == NULL) { - kfree(mn); - return ERR_PTR(-ENOMEM); - } + mn->mm = mm; return mn; } @@ -195,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { struct i915_mmu_object *mo; - mo = obj->userptr.mmu_object; - if (mo == NULL) + mo = fetch_and_zero(&obj->userptr.mmu_object); + if (!mo) return; spin_lock(&mo->mn->lock); del_object(mo); spin_unlock(&mo->mn->lock); kfree(mo); - - obj->userptr.mmu_object = NULL; } static struct i915_mmu_notifier * @@ -217,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) if (mn) return mn; - mn = i915_mmu_notifier_create(mm->mm); + mn = i915_mmu_notifier_create(mm); if (IS_ERR(mn)) err = PTR_ERR(mn); @@ -240,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) mutex_unlock(&mm->i915->mm_lock); up_write(&mm->mm->mmap_sem); - if (mn && !IS_ERR(mn)) { - destroy_workqueue(mn->wq); + if (mn && !IS_ERR(mn)) kfree(mn); - } return err ? ERR_PTR(err) : mm->mn; } @@ -266,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return PTR_ERR(mn); mo = kzalloc(sizeof(*mo), GFP_KERNEL); - if (mo == NULL) + if (!mo) return -ENOMEM; mo->mn = mn; mo->obj = obj; mo->it.start = obj->userptr.ptr; mo->it.last = obj->userptr.ptr + obj->base.size - 1; - INIT_WORK(&mo->work, cancel_userptr); + RB_CLEAR_NODE(&mo->it.rb); obj->userptr.mmu_object = mo; return 0; @@ -287,13 +304,17 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn, return; mmu_notifier_unregister(&mn->mn, mm); - destroy_workqueue(mn->wq); kfree(mn); } #else static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) +{ +} + +static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { } @@ -461,42 +482,6 @@ alloc_table: return st; } -static int -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, - bool value) -{ - int ret = 0; - - /* During mm_invalidate_range we need to cancel any userptr that - * overlaps the range being invalidated. Doing so requires the - * struct_mutex, and that risks recursion. In order to cause - * recursion, the user must alias the userptr address space with - * a GTT mmapping (possible with a MAP_FIXED) - then when we have - * to invalidate that mmaping, mm_invalidate_range is called with - * the userptr address *and* the struct_mutex held. To prevent that - * we set a flag under the i915_mmu_notifier spinlock to indicate - * whether this object is valid. - */ -#if defined(CONFIG_MMU_NOTIFIER) - if (obj->userptr.mmu_object == NULL) - return 0; - - spin_lock(&obj->userptr.mmu_object->mn->lock); - /* In order to serialise get_pages with an outstanding - * cancel_userptr, we must drop the struct_mutex and try again. - */ - if (!value) - del_object(obj->userptr.mmu_object); - else if (!work_pending(&obj->userptr.mmu_object->work)) - add_object(obj->userptr.mmu_object); - else - ret = -EAGAIN; - spin_unlock(&obj->userptr.mmu_object->mn->lock); -#endif - - return ret; -} - static void __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { @@ -682,12 +667,13 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - BUG_ON(obj->userptr.work != NULL); + /* Cancel any inflight work and force them to restart their gup */ + obj->userptr.work = NULL; __i915_gem_userptr_set_active(obj, false); + if (!pages) + return; - if (obj->mm.madv != I915_MADV_WILLNEED) - obj->mm.dirty = false; - + __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); for_each_sgt_page(page, sgt_iter, pages) { @@ -721,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_ASYNC_CANCEL, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, .dmabuf_export = i915_gem_userptr_dmabuf_export, @@ -791,8 +778,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, if (offset_in_page(args->user_ptr | args->user_size)) return -EINVAL; - if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, - (char __user *)(unsigned long)args->user_ptr, args->user_size)) + if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { @@ -807,7 +793,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENODEV; } - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c new file mode 100644 index 000000000000..2f5c72e2a9d1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include "i915_active.h" +#include "i915_gem_context.h" +#include "i915_gem_object.h" +#include "i915_globals.h" +#include "i915_request.h" +#include "i915_scheduler.h" +#include "i915_vma.h" + +static LIST_HEAD(globals); + +void __init i915_global_register(struct i915_global *global) +{ + GEM_BUG_ON(!global->shrink); + GEM_BUG_ON(!global->exit); + + list_add_tail(&global->link, &globals); +} + +static void __i915_globals_cleanup(void) +{ + struct i915_global *global, *next; + + list_for_each_entry_safe_reverse(global, next, &globals, link) + global->exit(); +} + +static __initconst int (* const initfn[])(void) = { + i915_global_active_init, + i915_global_context_init, + i915_global_gem_context_init, + i915_global_objects_init, + i915_global_request_init, + i915_global_scheduler_init, + i915_global_vma_init, +}; + +int __init i915_globals_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(initfn); i++) { + int err; + + err = initfn[i](); + if (err) { + __i915_globals_cleanup(); + return err; + } + } + + return 0; +} + +static void i915_globals_shrink(void) +{ + struct i915_global *global; + + /* + * kmem_cache_shrink() discards empty slabs and reorders partially + * filled slabs to prioritise allocating from the mostly full slabs, + * with the aim of reducing fragmentation. + */ + list_for_each_entry(global, &globals, link) + global->shrink(); +} + +static atomic_t active; +static atomic_t epoch; +struct park_work { + struct rcu_work work; + int epoch; +}; + +static void __i915_globals_park(struct work_struct *work) +{ + struct park_work *wrk = container_of(work, typeof(*wrk), work.work); + + /* Confirm nothing woke up in the last grace period */ + if (wrk->epoch == atomic_read(&epoch)) + i915_globals_shrink(); + + kfree(wrk); +} + +void i915_globals_park(void) +{ + struct park_work *wrk; + + /* + * Defer shrinking the global slab caches (and other work) until + * after a RCU grace period has completed with no activity. This + * is to try and reduce the latency impact on the consumers caused + * by us shrinking the caches the same time as they are trying to + * allocate, with the assumption being that if we idle long enough + * for an RCU grace period to elapse since the last use, it is likely + * to be longer until we need the caches again. + */ + if (!atomic_dec_and_test(&active)) + return; + + wrk = kmalloc(sizeof(*wrk), GFP_KERNEL); + if (!wrk) + return; + + wrk->epoch = atomic_inc_return(&epoch); + INIT_RCU_WORK(&wrk->work, __i915_globals_park); + queue_rcu_work(system_wq, &wrk->work); +} + +void i915_globals_unpark(void) +{ + atomic_inc(&epoch); + atomic_inc(&active); +} + +void __exit i915_globals_exit(void) +{ + /* Flush any residual park_work */ + rcu_barrier(); + flush_scheduled_work(); + + __i915_globals_cleanup(); + + /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ + rcu_barrier(); +} diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h new file mode 100644 index 000000000000..04c1ce107fc0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_GLOBALS_H_ +#define _I915_GLOBALS_H_ + +typedef void (*i915_global_func_t)(void); + +struct i915_global { + struct list_head link; + + i915_global_func_t shrink; + i915_global_func_t exit; +}; + +void i915_global_register(struct i915_global *global); + +int i915_globals_init(void); +void i915_globals_park(void); +void i915_globals_unpark(void); +void i915_globals_exit(void); + +/* constructors */ +int i915_global_active_init(void); +int i915_global_context_init(void); +int i915_global_gem_context_init(void); +int i915_global_objects_init(void); +int i915_global_request_init(void); +int i915_global_scheduler_init(void); +int i915_global_vma_init(void); + +#endif /* _I915_GLOBALS_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 07465123c166..c65d45bc63ee 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err_printf(m, "%s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x %02x", + err_printf(m, " %08x_%08x %8u %02x %02x", upper_32_bits(err->gtt_offset), lower_32_bits(err->gtt_offset), err->size, err->read_domains, - err->write_domain, - err->wseqno); + err->write_domain); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); err_puts(m, err->userptr ? " userptr" : ""); - err_puts(m, err->engine != -1 ? " " : ""); - err_puts(m, engine_name(m->i915, err->engine)); err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); if (err->name) @@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.instdone); - if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3) + if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3) return; err_printf(m, " SC_INSTDONE: 0x%08x\n", @@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, ee->instdone.row[slice][subslice]); } -static const char *bannable(const struct drm_i915_error_context *ctx) -{ - return ctx->bannable ? "" : " (unbannable)"; -} - static void error_print_request(struct drm_i915_error_state_buf *m, const char *prefix, const struct drm_i915_error_request *erq, @@ -447,9 +439,13 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", - prefix, erq->pid, erq->ban_score, - erq->context, erq->seqno, erq->sched_attr.priority, + err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", + prefix, erq->pid, erq->context, erq->seqno, + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &erq->flags) ? "!" : "", + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &erq->flags) ? "+" : "", + erq->sched_attr.priority, jiffies_to_msecs(erq->jiffies - epoch), erq->start, erq->head, erq->tail); } @@ -458,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct drm_i915_error_context *ctx) { - err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n", - header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, - ctx->sched_attr.priority, ctx->ban_score, bannable(ctx), - ctx->guilty, ctx->active); + err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n", + header, ctx->comm, ctx->pid, ctx->hw_id, + ctx->sched_attr.priority, ctx->guilty, ctx->active); } static void error_print_engine(struct drm_i915_error_state_buf *m, @@ -507,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) >= 6) { err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); - err_printf(m, " SYNC_0: 0x%08x\n", - ee->semaphore_mboxes[0]); - err_printf(m, " SYNC_1: 0x%08x\n", - ee->semaphore_mboxes[1]); - if (HAS_VEBOX(m->i915)) - err_printf(m, " SYNC_2: 0x%08x\n", - ee->semaphore_mboxes[2]); } if (HAS_PPGTT(m->i915)) { err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); @@ -528,15 +516,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ee->vm_info.pp_dir_base); } } - err_printf(m, " seqno: 0x%08x\n", ee->seqno); - err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno); - err_printf(m, " waiting: %s\n", yesno(ee->waiting)); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); - err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled)); - err_printf(m, " hangcheck action: %s\n", - hangcheck_action_to_str(ee->hangcheck_action)); - err_printf(m, " hangcheck action timestamp: %dms (%lu%s)\n", + err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", jiffies_to_msecs(ee->hangcheck_timestamp - epoch), ee->hangcheck_timestamp, ee->hangcheck_timestamp == epoch ? "; epoch" : ""); @@ -594,13 +576,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, static void err_print_capabilities(struct drm_i915_error_state_buf *m, const struct intel_device_info *info, + const struct intel_runtime_info *runtime, const struct intel_driver_caps *caps) { struct drm_printer p = i915_error_printer(m); intel_device_info_dump_flags(info, &p); intel_driver_caps_print(caps, &p); - intel_device_info_dump_topology(&info->sseu, &p); + intel_device_info_dump_topology(&runtime->sseu, &p); } static void err_print_params(struct drm_i915_error_state_buf *m, @@ -664,7 +647,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (*error->error_msg) err_printf(m, "%s\n", error->error_msg); - err_printf(m, "Kernel: %s\n", init_utsname()->release); + err_printf(m, "Kernel: %s %s\n", + init_utsname()->release, + init_utsname()->machine); ts = ktime_to_timespec64(error->time); err_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); @@ -681,19 +666,20 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, jiffies_to_msecs(error->capture - error->epoch)); for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - if (error->engine[i].hangcheck_stalled && - error->engine[i].context.pid) { - err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n", - engine_name(m->i915, i), - error->engine[i].context.comm, - error->engine[i].context.pid, - error->engine[i].context.ban_score, - bannable(&error->engine[i].context)); - } + if (!error->engine[i].context.pid) + continue; + + err_printf(m, "Active process (on ring %s): %s [%d]\n", + engine_name(m->i915, i), + error->engine[i].context.comm, + error->engine[i].context.pid); } err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); + err_printf(m, "Subplatform: 0x%x\n", + intel_subplatform(&error->runtime_info, + error->device_info.platform)); err_print_pciid(m, m->i915); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); @@ -719,8 +705,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); err_printf(m, "CCID: 0x%08x\n", error->ccid); - err_printf(m, "Missed interrupts: 0x%08lx\n", - m->i915->gpu_error.missed_irq_rings); for (i = 0; i < error->nfence; i++) err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); @@ -735,7 +719,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (IS_GEN7(m->i915)) + if (IS_GEN(m->i915, 7)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->engine); i++) { @@ -777,13 +761,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (obj) { err_puts(m, m->i915->engine[i]->name); if (ee->context.pid) - err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)", + err_printf(m, " (submitted by %s [%d])", ee->context.comm, - ee->context.pid, - ee->context.handle, - ee->context.hw_id, - ee->context.ban_score, - bannable(&ee->context)); + ee->context.pid); err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); @@ -804,21 +784,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, error->epoch); } - if (IS_ERR(ee->waiters)) { - err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", - m->i915->engine[i]->name); - } else if (ee->num_waiters) { - err_printf(m, "%s --- %d waiters\n", - m->i915->engine[i]->name, - ee->num_waiters); - for (j = 0; j < ee->num_waiters; j++) { - err_printf(m, " seqno 0x%08x for %s [%d]\n", - ee->waiters[j].seqno, - ee->waiters[j].comm, - ee->waiters[j].pid); - } - } - print_error_obj(m, m->i915->engine[i], "ringbuffer", ee->ringbuffer); @@ -844,7 +809,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (error->display) intel_display_print_error_state(m, error->display); - err_print_capabilities(m, &error->device_info, &error->driver_caps); + err_print_capabilities(m, &error->device_info, &error->runtime_info, + &error->driver_caps); err_print_params(m, &error->params); err_print_uc(m, &error->uc); } @@ -963,17 +929,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj) kfree(obj); } -static __always_inline void free_param(const char *type, void *x) -{ - if (!__builtin_strcmp(type, "char *")) - kfree(*(void **)x); -} static void cleanup_params(struct i915_gpu_state *error) { -#define FREE(T, x, ...) free_param(#T, &error->params.x); - I915_PARAMS_FOR_EACH(FREE); -#undef FREE + i915_params_free(&error->params); } static void cleanup_uc_state(struct i915_gpu_state *error) @@ -1006,8 +965,6 @@ void __i915_gpu_state_free(struct kref *error_ref) i915_error_object_free(ee->wa_ctx); kfree(ee->requests); - if (!IS_ERR_OR_NULL(ee->waiters)) - kfree(ee->waiters); } for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) @@ -1037,7 +994,7 @@ i915_error_object_create(struct drm_i915_private *i915, dma_addr_t dma; int ret; - if (!vma) + if (!vma || !vma->pages) return NULL; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; @@ -1082,27 +1039,6 @@ i915_error_object_create(struct drm_i915_private *i915, return dst; } -/* The error capture is special as tries to run underneath the normal - * locking rules - so we use the raw version of the i915_gem_active lookup. - */ -static inline uint32_t -__active_get_seqno(struct i915_gem_active *active) -{ - struct i915_request *request; - - request = __i915_gem_active_peek(active); - return request ? request->global_seqno : 0; -} - -static inline int -__active_get_engine_id(struct i915_gem_active *active) -{ - struct i915_request *request; - - request = __i915_gem_active_peek(active); - return request ? request->engine->id : -1; -} - static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) { @@ -1111,9 +1047,6 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->size = obj->base.size; err->name = obj->base.name; - err->wseqno = __active_get_seqno(&obj->frontbuffer_write); - err->engine = __active_get_engine_id(&obj->frontbuffer_write); - err->gtt_offset = vma->node.start; err->read_domains = obj->read_domains; err->write_domain = obj->write_domain; @@ -1127,7 +1060,9 @@ static void capture_bo(struct drm_i915_error_buffer *err, static u32 capture_error_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head, - bool pinned_only) + unsigned int flags) +#define ACTIVE_ONLY BIT(0) +#define PINNED_ONLY BIT(1) { struct i915_vma *vma; int i = 0; @@ -1136,7 +1071,10 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, if (!vma->obj) continue; - if (pinned_only && !i915_vma_is_pinned(vma)) + if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma)) + continue; + + if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma)) continue; capture_bo(err++, vma); @@ -1147,7 +1085,8 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, return i; } -/* Generate a semi-unique error code. The code is not meant to have meaning, The +/* + * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by * grossly estimating a GPU error state. * @@ -1156,29 +1095,23 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, * * It's only a small step better than a random number in its current form. */ -static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, - struct i915_gpu_state *error, - int *engine_id) +static u32 i915_error_generate_code(struct i915_gpu_state *error, + intel_engine_mask_t engine_mask) { - uint32_t error_code = 0; - int i; - - /* IPEHR would be an ideal way to detect errors, as it's the gross + /* + * IPEHR would be an ideal way to detect errors, as it's the gross * measure of "the command that hung." However, has some very common * synchronization commands which almost always appear in the case * strictly a client bug. Use instdone to differentiate those some. */ - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (error->engine[i].hangcheck_stalled) { - if (engine_id) - *engine_id = i; + if (engine_mask) { + struct drm_i915_error_engine *ee = + &error->engine[ffs(engine_mask)]; - return error->engine[i].ipehr ^ - error->engine[i].instdone.instdone; - } + return ee->ipehr ^ ee->instdone.instdone; } - return error_code; + return 0; } static void gem_record_fences(struct i915_gpu_state *error) @@ -1199,71 +1132,6 @@ static void gem_record_fences(struct i915_gpu_state *error) error->nfence = i; } -static void gen6_record_semaphore_state(struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - struct drm_i915_private *dev_priv = engine->i915; - - ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base)); - ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base)); - if (HAS_VEBOX(dev_priv)) - ee->semaphore_mboxes[2] = - I915_READ(RING_SYNC_2(engine->mmio_base)); -} - -static void error_record_engine_waiters(struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct drm_i915_error_waiter *waiter; - struct rb_node *rb; - int count; - - ee->num_waiters = 0; - ee->waiters = NULL; - - if (RB_EMPTY_ROOT(&b->waiters)) - return; - - if (!spin_trylock_irq(&b->rb_lock)) { - ee->waiters = ERR_PTR(-EDEADLK); - return; - } - - count = 0; - for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb)) - count++; - spin_unlock_irq(&b->rb_lock); - - waiter = NULL; - if (count) - waiter = kmalloc_array(count, - sizeof(struct drm_i915_error_waiter), - GFP_ATOMIC); - if (!waiter) - return; - - if (!spin_trylock_irq(&b->rb_lock)) { - kfree(waiter); - ee->waiters = ERR_PTR(-EDEADLK); - return; - } - - ee->waiters = waiter; - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - strcpy(waiter->comm, w->tsk->comm); - waiter->pid = w->tsk->pid; - waiter->seqno = w->seqno; - waiter++; - - if (++ee->num_waiters == count) - break; - } - spin_unlock_irq(&b->rb_lock); -} - static void error_record_engine_registers(struct i915_gpu_state *error, struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) @@ -1271,66 +1139,62 @@ static void error_record_engine_registers(struct i915_gpu_state *error, struct drm_i915_private *dev_priv = engine->i915; if (INTEL_GEN(dev_priv) >= 6) { - ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); - if (INTEL_GEN(dev_priv) >= 8) { + ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); + if (INTEL_GEN(dev_priv) >= 8) ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); - } else { - gen6_record_semaphore_state(engine, ee); + else ee->fault_reg = I915_READ(RING_FAULT_REG(engine)); - } } if (INTEL_GEN(dev_priv) >= 4) { - ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); - ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); - ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - ee->instps = I915_READ(RING_INSTPS(engine->mmio_base)); - ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); + ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); + ee->ipeir = ENGINE_READ(engine, RING_IPEIR); + ee->ipehr = ENGINE_READ(engine, RING_IPEHR); + ee->instps = ENGINE_READ(engine, RING_INSTPS); + ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); if (INTEL_GEN(dev_priv) >= 8) { - ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; - ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; + ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; + ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; } - ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base)); + ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); } else { - ee->faddr = I915_READ(DMA_FADD_I8XX); - ee->ipeir = I915_READ(IPEIR); - ee->ipehr = I915_READ(IPEHR); + ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); + ee->ipeir = ENGINE_READ(engine, IPEIR); + ee->ipehr = ENGINE_READ(engine, IPEHR); } intel_engine_get_instdone(engine, &ee->instdone); - ee->waiting = intel_engine_has_waiter(engine); - ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); + ee->instpm = ENGINE_READ(engine, RING_INSTPM); ee->acthd = intel_engine_get_active_head(engine); - ee->seqno = intel_engine_get_seqno(engine); - ee->last_seqno = intel_engine_last_submit(engine); - ee->start = I915_READ_START(engine); - ee->head = I915_READ_HEAD(engine); - ee->tail = I915_READ_TAIL(engine); - ee->ctl = I915_READ_CTL(engine); + ee->start = ENGINE_READ(engine, RING_START); + ee->head = ENGINE_READ(engine, RING_HEAD); + ee->tail = ENGINE_READ(engine, RING_TAIL); + ee->ctl = ENGINE_READ(engine, RING_CTL); if (INTEL_GEN(dev_priv) > 2) - ee->mode = I915_READ_MODE(engine); + ee->mode = ENGINE_READ(engine, RING_MI_MODE); if (!HWS_NEEDS_PHYSICAL(dev_priv)) { i915_reg_t mmio; - if (IS_GEN7(dev_priv)) { + if (IS_GEN(dev_priv, 7)) { switch (engine->id) { default: - case RCS: + MISSING_CASE(engine->id); + case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; - case BCS: + case BCS0: mmio = BLT_HWS_PGA_GEN7; break; - case VCS: + case VCS0: mmio = BSD_HWS_PGA_GEN7; break; - case VECS: + case VECS0: mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->i915)) { + } else if (IS_GEN(engine->i915, 6)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -1341,9 +1205,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } ee->idle = intel_engine_is_idle(engine); - ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; - ee->hangcheck_action = engine->hangcheck.action; - ee->hangcheck_stalled = engine->hangcheck.stalled; + if (!ee->idle) + ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, engine); @@ -1352,12 +1215,12 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) ee->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE_READ(engine)); - else if (IS_GEN7(dev_priv)) + ENGINE_READ(engine, RING_PP_DIR_BASE_READ); + else if (IS_GEN(dev_priv, 7)) ee->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE(engine)); + ENGINE_READ(engine, RING_PP_DIR_BASE); else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ee->vm_info.pdp[i] = @@ -1374,10 +1237,10 @@ static void record_request(struct i915_request *request, { struct i915_gem_context *ctx = request->gem_context; - erq->context = ctx->hw_id; + erq->flags = request->fence.flags; + erq->context = request->fence.context; + erq->seqno = request->fence.seqno; erq->sched_attr = request->sched.attr; - erq->ban_score = atomic_read(&ctx->ban_score); - erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->start = i915_ggtt_offset(request->ring->vma); erq->head = request->head; @@ -1468,11 +1331,8 @@ static void record_context(struct drm_i915_error_context *e, rcu_read_unlock(); } - e->handle = ctx->user_handle; e->hw_id = ctx->hw_id; e->sched_attr = ctx->sched; - e->ban_score = atomic_read(&ctx->ban_score); - e->bannable = i915_gem_context_is_bannable(ctx); e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); } @@ -1549,10 +1409,9 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->engine_id = i; error_record_engine_registers(error, engine, ee); - error_record_engine_waiters(engine, ee); error_record_engine_execlists(engine, ee); - request = i915_gem_find_active_request(engine); + request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; @@ -1613,14 +1472,17 @@ static void gem_capture_vm(struct i915_gpu_state *error, int count; count = 0; - list_for_each_entry(vma, &vm->active_list, vm_link) - count++; + list_for_each_entry(vma, &vm->bound_list, vm_link) + if (i915_vma_is_active(vma)) + count++; active_bo = NULL; if (count) active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); if (active_bo) - count = capture_error_bo(active_bo, count, &vm->active_list, false); + count = capture_error_bo(active_bo, + count, &vm->bound_list, + ACTIVE_ONLY); else count = 0; @@ -1658,28 +1520,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error) struct i915_address_space *vm = &error->i915->ggtt.vm; struct drm_i915_error_buffer *bo; struct i915_vma *vma; - int count_inactive, count_active; - - count_inactive = 0; - list_for_each_entry(vma, &vm->inactive_list, vm_link) - count_inactive++; + int count; - count_active = 0; - list_for_each_entry(vma, &vm->active_list, vm_link) - count_active++; + count = 0; + list_for_each_entry(vma, &vm->bound_list, vm_link) + count++; bo = NULL; - if (count_inactive + count_active) - bo = kcalloc(count_inactive + count_active, - sizeof(*bo), GFP_ATOMIC); + if (count) + bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); if (!bo) return; - count_inactive = capture_error_bo(bo, count_inactive, - &vm->active_list, true); - count_active = capture_error_bo(bo + count_inactive, count_active, - &vm->inactive_list, true); - error->pinned_bo_count = count_inactive + count_active; + error->pinned_bo_count = + capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY); error->pinned_bo = bo; } @@ -1725,7 +1579,7 @@ static void capture_reg_state(struct i915_gpu_state *error) error->forcewake = I915_READ_FW(FORCEWAKE_VLV); } - if (IS_GEN7(dev_priv)) + if (IS_GEN(dev_priv, 7)) error->err_int = I915_READ(GEN7_ERR_INT); if (INTEL_GEN(dev_priv) >= 8) { @@ -1733,7 +1587,7 @@ static void capture_reg_state(struct i915_gpu_state *error) error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); } - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { error->forcewake = I915_READ_FW(FORCEWAKE); error->gab_ctl = I915_READ(GAB_CTL); error->gfx_mode = I915_READ(GFX_MODE); @@ -1750,10 +1604,10 @@ static void capture_reg_state(struct i915_gpu_state *error) } if (INTEL_GEN(dev_priv) >= 5) - error->ccid = I915_READ(CCID); + error->ccid = I915_READ(CCID(RENDER_RING_BASE)); /* 3: Feature specific registers */ - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { + if (IS_GEN_RANGE(dev_priv, 6, 7)) { error->gam_ecochk = I915_READ(GAM_ECOCHK); error->gac_eco = I915_READ(GAC_ECO_BITS); } @@ -1777,7 +1631,7 @@ static void capture_reg_state(struct i915_gpu_state *error) error->ier = I915_READ(DEIER); error->gtier[0] = I915_READ(GTIER); error->ngtier = 1; - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { error->ier = I915_READ16(IER); } else if (!IS_VALLEYVIEW(dev_priv)) { error->ier = I915_READ(IER); @@ -1786,31 +1640,36 @@ static void capture_reg_state(struct i915_gpu_state *error) error->pgtbl_er = I915_READ(PGTBL_ER); } -static void i915_error_capture_msg(struct drm_i915_private *dev_priv, - struct i915_gpu_state *error, - u32 engine_mask, - const char *error_msg) +static const char * +error_msg(struct i915_gpu_state *error, + intel_engine_mask_t engines, const char *msg) { - u32 ecode; - int engine_id = -1, len; + int len; + int i; - ecode = i915_error_generate_code(dev_priv, error, &engine_id); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) + if (!error->engine[i].context.pid) + engines &= ~BIT(i); len = scnprintf(error->error_msg, sizeof(error->error_msg), - "GPU HANG: ecode %d:%d:0x%08x", - INTEL_GEN(dev_priv), engine_id, ecode); - - if (engine_id != -1 && error->engine[engine_id].context.pid) + "GPU HANG: ecode %d:%x:0x%08x", + INTEL_GEN(error->i915), engines, + i915_error_generate_code(error, engines)); + if (engines) { + /* Just show the first executing process, more is confusing */ + i = __ffs(engines); len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", - error->engine[engine_id].context.comm, - error->engine[engine_id].context.pid); + error->engine[i].context.comm, + error->engine[i].context.pid); + } + if (msg) + len += scnprintf(error->error_msg + len, + sizeof(error->error_msg) - len, + ", %s", msg); - scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, - ", reason: %s, action: %s", - error_msg, - engine_mask ? "reset" : "continue"); + return error->error_msg; } static void capture_gen_state(struct i915_gpu_state *error) @@ -1831,21 +1690,15 @@ static void capture_gen_state(struct i915_gpu_state *error) memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); + memcpy(&error->runtime_info, + RUNTIME_INFO(i915), + sizeof(error->runtime_info)); error->driver_caps = i915->caps; } -static __always_inline void dup_param(const char *type, void *x) -{ - if (!__builtin_strcmp(type, "char *")) - *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC); -} - static void capture_params(struct i915_gpu_state *error) { - error->params = i915_modparams; -#define DUP(T, x, ...) dup_param(#T, &error->params.x); - I915_PARAMS_FOR_EACH(DUP); -#undef DUP + i915_params_copy(&error->params, &i915_modparams); } static unsigned long capture_find_epoch(const struct i915_gpu_state *error) @@ -1856,7 +1709,7 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error) for (i = 0; i < ARRAY_SIZE(error->engine); i++) { const struct drm_i915_error_engine *ee = &error->engine[i]; - if (ee->hangcheck_stalled && + if (ee->hangcheck_timestamp && time_before(ee->hangcheck_timestamp, epoch)) epoch = ee->hangcheck_timestamp; } @@ -1907,9 +1760,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915) { struct i915_gpu_state *error; + /* Check if GPU capture has been disabled */ + error = READ_ONCE(i915->gpu_error.first_error); + if (IS_ERR(error)) + return error; + error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) - return NULL; + if (!error) { + i915_disable_error_state(i915, -ENOMEM); + return ERR_PTR(-ENOMEM); + } kref_init(&error->ref); error->i915 = i915; @@ -1923,7 +1783,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915) * i915_capture_error_state - capture an error record for later analysis * @i915: i915 device * @engine_mask: the mask of engines triggering the hang - * @error_msg: a message to insert into the error capture header + * @msg: a message to insert into the error capture header * * Should be called when an error is detected (either a hang or an error * interrupt) to capture error state from the time of the error. Fills @@ -1931,8 +1791,8 @@ i915_capture_gpu_state(struct drm_i915_private *i915) * to pick up. */ void i915_capture_error_state(struct drm_i915_private *i915, - u32 engine_mask, - const char *error_msg) + intel_engine_mask_t engine_mask, + const char *msg) { static bool warned; struct i915_gpu_state *error; @@ -1945,14 +1805,10 @@ void i915_capture_error_state(struct drm_i915_private *i915, return; error = i915_capture_gpu_state(i915); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - i915_disable_error_state(i915, -ENOMEM); + if (IS_ERR(error)) return; - } - i915_error_capture_msg(i915, error, engine_mask, error_msg); - DRM_INFO("%s\n", error->error_msg); + dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg)); if (!error->simulated) { spin_lock_irqsave(&i915->gpu_error.lock, flags); @@ -1987,7 +1843,7 @@ i915_first_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - if (error) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_get(error); spin_unlock_irq(&i915->gpu_error.lock); @@ -2000,10 +1856,11 @@ void i915_reset_error_state(struct drm_i915_private *i915) spin_lock_irq(&i915->gpu_error.lock); error = i915->gpu_error.first_error; - i915->gpu_error.first_error = NULL; + if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ + i915->gpu_error.first_error = NULL; spin_unlock_irq(&i915->gpu_error.lock); - if (!IS_ERR(error)) + if (!IS_ERR_OR_NULL(error)) i915_gpu_state_put(error); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index ff2652bbb0b0..5dc761e85d9d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -45,6 +45,7 @@ struct i915_gpu_state { u32 reset_count; u32 suspend_count; struct intel_device_info device_info; + struct intel_runtime_info runtime_info; struct intel_driver_caps driver_caps; struct i915_params params; @@ -81,11 +82,7 @@ struct i915_gpu_state { int engine_id; /* Software tracked state */ bool idle; - bool waiting; - int num_waiters; unsigned long hangcheck_timestamp; - bool hangcheck_stalled; - enum intel_engine_hangcheck_action hangcheck_action; struct i915_address_space *vm; int num_requests; u32 reset_count; @@ -97,8 +94,6 @@ struct i915_gpu_state { u32 cpu_ring_head; u32 cpu_ring_tail; - u32 last_seqno; - /* Register state */ u32 start; u32 tail; @@ -111,24 +106,19 @@ struct i915_gpu_state { u32 bbstate; u32 instpm; u32 instps; - u32 seqno; u64 bbaddr; u64 acthd; u32 fault_reg; u64 faddr; u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; struct intel_instdone instdone; struct drm_i915_error_context { char comm[TASK_COMM_LEN]; pid_t pid; - u32 handle; u32 hw_id; - int ban_score; int active; int guilty; - bool bannable; struct i915_sched_attr sched_attr; } context; @@ -148,10 +138,10 @@ struct i915_gpu_state { struct drm_i915_error_object *default_state; struct drm_i915_error_request { + unsigned long flags; long jiffies; pid_t pid; u32 context; - int ban_score; u32 seqno; u32 start; u32 head; @@ -160,12 +150,6 @@ struct i915_gpu_state { } *requests, execlist[EXECLIST_MAX_PORTS]; unsigned int num_ports; - struct drm_i915_error_waiter { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 seqno; - } *waiters; - struct { u32 gfx_mode; union { @@ -178,7 +162,6 @@ struct i915_gpu_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; @@ -187,7 +170,6 @@ struct i915_gpu_state { u32 dirty:1; u32 purgeable:1; u32 userptr:1; - s32 engine:4; u32 cache_level:3; } *active_bo[I915_NUM_ENGINES], *pinned_bo; u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; @@ -196,6 +178,8 @@ struct i915_gpu_state { struct scatterlist *sgl, *fit; }; +struct i915_gpu_restart; + struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ @@ -210,50 +194,13 @@ struct i915_gpu_error { atomic_t pending_fb_pin; - unsigned long missed_irq_rings; - - /** - * State variable controlling the reset flow and count - * - * This is a counter which gets incremented when reset is triggered, - * - * Before the reset commences, the I915_RESET_BACKOFF bit is set - * meaning that any waiters holding onto the struct_mutex should - * relinquish the lock immediately in order for the reset to start. - * - * If reset is not completed successfully, the I915_WEDGE bit is - * set meaning that hardware is terminally sour and there is no - * recovery. All waiters on the reset_queue will be woken when - * that happens. - * - * This counter is used by the wait_seqno code to notice that reset - * event happened and it needs to restart the entire ioctl (since most - * likely the seqno it waited for won't ever signal anytime soon). - * - * This is important for lock-free wait paths, where no contended lock - * naturally enforces the correct ordering between the bail-out of the - * waiter and the gpu reset work code. - */ - unsigned long reset_count; - /** * flags: Control various stages of the GPU reset * - * #I915_RESET_BACKOFF - When we start a reset, we want to stop any - * other users acquiring the struct_mutex. To do this we set the - * #I915_RESET_BACKOFF bit in the error flags when we detect a reset - * and then check for that bit before acquiring the struct_mutex (in - * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a - * secondary role in preventing two concurrent global reset attempts. - * - * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the - * struct_mutex. We try to acquire the struct_mutex in the reset worker, - * but it may be held by some long running waiter (that we cannot - * interrupt without causing trouble). Once we are ready to do the GPU - * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If - * they already hold the struct_mutex and want to participate they can - * inspect the bit and do the reset directly, otherwise the worker - * waits for the struct_mutex. + * #I915_RESET_BACKOFF - When we start a global reset, we need to + * serialise with any other users attempting to do the same, and + * any global resources that may be clobber by the reset (such as + * FENCE registers). * * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to * acquire the struct_mutex to reset an engine, we need an explicit @@ -268,19 +215,17 @@ struct i915_gpu_error { */ unsigned long flags; #define I915_RESET_BACKOFF 0 -#define I915_RESET_HANDOFF 1 -#define I915_RESET_MODESET 2 +#define I915_RESET_MODESET 1 +#define I915_RESET_ENGINE 2 #define I915_WEDGED (BITS_PER_LONG - 1) -#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) + + /** Number of times the device has been reset (global) */ + u32 reset_count; /** Number of times an engine has been reset */ u32 reset_engine_count[I915_NUM_ENGINES]; - /** Set of stalled engines with guilty requests, in the current reset */ - u32 stalled_mask; - - /** Reason for the current *global* reset */ - const char *reason; + struct mutex wedge_mutex; /* serialises wedging/unwedging */ /** * Waitqueue to signal when a hang is detected. Used to for waiters @@ -294,8 +239,9 @@ struct i915_gpu_error { */ wait_queue_head_t reset_queue; - /* For missed irq/seqno simulation. */ - unsigned long test_irq_rings; + struct srcu_struct reset_backoff_srcu; + + struct i915_gpu_restart *restart; }; struct drm_i915_error_state_buf { @@ -317,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, + intel_engine_mask_t engine_mask, const char *error_msg); static inline struct i915_gpu_state * diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 0e5c580d117c..c1007245f46d 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -28,8 +28,8 @@ */ #include <linux/compat.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> +#include <drm/drm_ioctl.h> #include "i915_drv.h" struct drm_i915_getparam32 { @@ -52,7 +52,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || + if (!access_ok(request, sizeof(*request)) || __put_user(req32.param, &request->param) || __put_user((void __user *)(unsigned long)req32.value, &request->value)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d447d7d508f4..aa107a78cb36 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -30,8 +30,10 @@ #include <linux/sysrq.h> #include <linux/slab.h> +#include <linux/cpuidle.h> #include <linux/circ_buf.h> -#include <drm/drmP.h> +#include <drm/drm_irq.h> +#include <drm/drm_drv.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" @@ -224,10 +226,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); /* For display hotplug interrupt */ static inline void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits) + u32 mask, + u32 bits) { - uint32_t val; + u32 val; lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(bits & ~mask); @@ -251,8 +253,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, * version is also available. */ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits) + u32 mask, + u32 bits) { spin_lock_irq(&dev_priv->irq_lock); i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); @@ -267,7 +269,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 dw; lockdep_assert_held(&i915->irq_lock); @@ -301,10 +303,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, * @enabled_irq_mask: mask of interrupt bits to enable */ void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -331,8 +333,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { lockdep_assert_held(&dev_priv->irq_lock); @@ -346,13 +348,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, I915_WRITE(GTIMR, dev_priv->gt_irq_mask); } -void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) { ilk_update_gt_irq(dev_priv, mask, mask); POSTING_READ_FW(GTIMR); } -void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) { ilk_update_gt_irq(dev_priv, mask, 0); } @@ -391,10 +393,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) * @enabled_irq_mask: mask of interrupt bits to enable */ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -578,11 +580,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) * @enabled_irq_mask: mask of interrupt bits to enable */ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; - uint32_t old_val; + u32 new_val; + u32 old_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -612,10 +614,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, */ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -642,10 +644,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t sdeimr = I915_READ(SDEIMR); + u32 sdeimr = I915_READ(SDEIMR); sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); @@ -747,13 +749,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, POSTING_READ(reg); } +static bool i915_has_asle(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->opregion.asle) + return false; + + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion * @dev_priv: i915 device private */ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) + if (!i915_has_asle(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); @@ -822,11 +832,26 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + const struct drm_display_mode *mode = &vblank->hwmode; i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; - const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; unsigned long irqflags; + /* + * On i965gm TV output the frame counter only works up to + * the point when we enable the TV encoder. After that the + * frame counter ceases to work and reads zero. We need a + * vblank wait before enabling the TV encoder and so we + * have to enable vblank interrupts while the frame counter + * is still in a working state. However the core vblank code + * does not like us returning non-zero frame counter values + * when we've told it that we don't have a working frame + * counter. Thus we must stop non-zero values leaking out. + */ + if (!vblank->max_vblank_count) + return 0; + htotal = mode->crtc_htotal; hsync_start = mode->crtc_hsync_start; vbl_start = mode->crtc_vblank_start; @@ -950,7 +975,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; @@ -998,6 +1023,9 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; + bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || + IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || + mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; if (WARN_ON(!mode->crtc_clock)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " @@ -1030,7 +1058,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, if (stime) *stime = ktime_get(); - if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + if (use_scanline_counter) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ @@ -1090,7 +1118,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, else position += vtotal - vbl_end; - if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + if (use_scanline_counter) { *vpos = position; *hpos = 0; } else { @@ -1152,76 +1180,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) return; } -static void notify_ring(struct intel_engine_cs *engine) -{ - const u32 seqno = intel_engine_get_seqno(engine); - struct i915_request *rq = NULL; - struct task_struct *tsk = NULL; - struct intel_wait *wait; - - if (unlikely(!engine->breadcrumbs.irq_armed)) - return; - - rcu_read_lock(); - - spin_lock(&engine->breadcrumbs.irq_lock); - wait = engine->breadcrumbs.irq_wait; - if (wait) { - /* - * We use a callback from the dma-fence to submit - * requests after waiting on our own requests. To - * ensure minimum delay in queuing the next request to - * hardware, signal the fence now rather than wait for - * the signaler to be woken up. We still wake up the - * waiter in order to handle the irq-seqno coherency - * issues (we may receive the interrupt before the - * seqno is written, see __i915_request_irq_complete()) - * and to handle coalescing of multiple seqno updates - * and many waiters. - */ - if (i915_seqno_passed(seqno, wait->seqno)) { - struct i915_request *waiter = wait->request; - - if (waiter && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &waiter->fence.flags) && - intel_wait_check_request(wait, waiter)) - rq = i915_request_get(waiter); - - tsk = wait->tsk; - } else { - if (engine->irq_seqno_barrier && - i915_seqno_passed(seqno, wait->seqno - 1)) { - set_bit(ENGINE_IRQ_BREADCRUMB, - &engine->irq_posted); - tsk = wait->tsk; - } - } - - engine->breadcrumbs.irq_count++; - } else { - if (engine->breadcrumbs.irq_armed) - __intel_engine_disarm_breadcrumbs(engine); - } - spin_unlock(&engine->breadcrumbs.irq_lock); - - if (rq) { - spin_lock(&rq->lock); - dma_fence_signal_locked(&rq->fence); - GEM_BUG_ON(!i915_request_completed(rq)); - spin_unlock(&rq->lock); - - i915_request_put(rq); - } - - if (tsk && tsk->state & TASK_NORMAL) - wake_up_process(tsk); - - rcu_read_unlock(); - - trace_intel_engine_notify(engine, wait); -} - static void vlv_c0_read(struct drm_i915_private *dev_priv, struct intel_rps_ei *ei) { @@ -1339,6 +1297,18 @@ static void gen6_pm_rps_work(struct work_struct *work) rps->last_adj = adj; + /* + * Limit deboosting and boosting to keep ourselves at the extremes + * when in the respective power modes (i.e. slowly decrease frequencies + * while in the HIGH_POWER zone and slowly increase frequencies while + * in the LOW_POWER zone). On idle, we will hit the timeout and drop + * to the next level quickly, and conversely if busy we expect to + * hit a waitboost and rapidly switch into max power. + */ + if ((adj < 0 && rps->power.mode == HIGH_POWER) || + (adj > 0 && rps->power.mode == LOW_POWER)) + rps->last_adj = 0; + /* sysfs frequency interfaces may have snuck in while servicing the * interrupt */ @@ -1376,8 +1346,8 @@ static void ivybridge_parity_work(struct work_struct *work) container_of(work, typeof(*dev_priv), l3_parity.error_work); u32 error_status, row, bank, subbank; char *parity_event[6]; - uint32_t misccpctl; - uint8_t slice = 0; + u32 misccpctl; + u8 slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. * In order to prevent a get/put style interface, acquire struct mutex @@ -1466,20 +1436,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); } static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (gt_iir & GT_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev_priv->engine[BCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -1499,8 +1469,8 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) tasklet = true; if (iir & GT_RENDER_USER_INTERRUPT) { - notify_ring(engine); - tasklet |= USES_GUC_SUBMISSION(engine->i915); + intel_engine_breadcrumbs_irq(engine); + tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); } if (tasklet) @@ -1510,12 +1480,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) static void gen8_gt_irq_ack(struct drm_i915_private *i915, u32 master_ctl, u32 gt_iir[4]) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ GEN8_GT_BCS_IRQ | \ + GEN8_GT_VCS0_IRQ | \ GEN8_GT_VCS1_IRQ | \ - GEN8_GT_VCS2_IRQ | \ GEN8_GT_VECS_IRQ | \ GEN8_GT_PM_IRQ | \ GEN8_GT_GUC_IRQ) @@ -1526,7 +1496,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); } - if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); if (likely(gt_iir[1])) raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); @@ -1549,21 +1519,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, u32 master_ctl, u32 gt_iir[4]) { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { - gen8_cs_irq_handler(i915->engine[RCS], + gen8_cs_irq_handler(i915->engine[RCS0], gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); - gen8_cs_irq_handler(i915->engine[BCS], + gen8_cs_irq_handler(i915->engine[BCS0], gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); } - if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { - gen8_cs_irq_handler(i915->engine[VCS], + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { + gen8_cs_irq_handler(i915->engine[VCS0], + gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); + gen8_cs_irq_handler(i915->engine[VCS1], gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); - gen8_cs_irq_handler(i915->engine[VCS2], - gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); } if (master_ctl & GEN8_GT_VECS_IRQ) { - gen8_cs_irq_handler(i915->engine[VECS], + gen8_cs_irq_handler(i915->engine[VECS0], gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); } @@ -1738,13 +1708,15 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) #if defined(CONFIG_DEBUG_FS) static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t crc0, uint32_t crc1, - uint32_t crc2, uint32_t crc3, - uint32_t crc4) + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - uint32_t crcs[5]; + u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; + + trace_intel_pipe_crc(crtc, crcs); spin_lock(&pipe_crc->lock); /* @@ -1763,11 +1735,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, } spin_unlock(&pipe_crc->lock); - crcs[0] = crc0; - crcs[1] = crc1; - crcs[2] = crc2; - crcs[3] = crc3; - crcs[4] = crc4; drm_crtc_add_crc_entry(&crtc->base, true, drm_crtc_accurate_vblank_count(&crtc->base), crcs); @@ -1776,9 +1743,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static inline void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t crc0, uint32_t crc1, - uint32_t crc2, uint32_t crc3, - uint32_t crc4) {} + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) {} #endif @@ -1804,7 +1771,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - uint32_t res1, res2; + u32 res1, res2; if (INTEL_GEN(dev_priv) >= 3) res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); @@ -1843,13 +1810,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (INTEL_GEN(dev_priv) >= 8) return; - if (HAS_VEBOX(dev_priv)) { - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(dev_priv->engine[VECS]); + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); - } + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) @@ -2547,7 +2512,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, I915_WRITE(SDEIIR, pch_iir); } - if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) + if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) ironlake_rps_change_irq_handler(dev_priv); } @@ -2718,6 +2683,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); } +static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) +{ + u32 mask = GEN8_AUX_CHANNEL_A; + + if (INTEL_GEN(dev_priv) >= 9) + mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (IS_CNL_WITH_PORT_F(dev_priv)) + mask |= CNL_AUX_CHANNEL_F; + + if (INTEL_GEN(dev_priv) >= 11) + mask |= ICL_AUX_CHANNEL_E | + CNL_AUX_CHANNEL_F; + + return mask; +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2773,20 +2757,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; - tmp_mask = GEN8_AUX_CHANNEL_A; - if (INTEL_GEN(dev_priv) >= 9) - tmp_mask |= GEN9_AUX_CHANNEL_B | - GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - if (INTEL_GEN(dev_priv) >= 11) - tmp_mask |= ICL_AUX_CHANNEL_E; - - if (IS_CNL_WITH_PORT_F(dev_priv) || - INTEL_GEN(dev_priv) >= 11) - tmp_mask |= CNL_AUX_CHANNEL_F; - - if (iir & tmp_mask) { + if (iir & gen8_de_port_aux_mask(dev_priv)) { dp_aux_irq_handler(dev_priv); found = true; } @@ -2867,11 +2838,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_handler(dev_priv, iir); - else if (HAS_PCH_SPT(dev_priv) || - HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); @@ -2908,7 +2877,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = to_i915(arg); - void __iomem * const regs = dev_priv->regs; + void __iomem * const regs = dev_priv->uncore.regs; u32 master_ctl; u32 gt_iir[4]; @@ -2938,51 +2907,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -struct wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const char *name; -}; - -static void wedge_me(struct work_struct *work) -{ - struct wedge_me *w = container_of(work, typeof(*w), work.work); - - dev_err(w->i915->drm.dev, - "%s timed out, cancelling all in-flight rendering.\n", - w->name); - i915_gem_set_wedged(w->i915); -} - -static void __init_wedge(struct wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) -{ - w->i915 = i915; - w->name = name; - - INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __fini_wedge(struct wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __fini_wedge((W))) - static u32 gen11_gt_engine_identity(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 timeout_ts; u32 ident; @@ -3066,7 +2995,7 @@ static void gen11_gt_bank_handler(struct drm_i915_private * const i915, const unsigned int bank) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; unsigned long intr_dw; unsigned int bit; @@ -3109,7 +3038,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915, static u32 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) { - void __iomem * const regs = dev_priv->regs; + void __iomem * const regs = dev_priv->uncore.regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -3150,7 +3079,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = to_i915(arg); - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 master_ctl; u32 gu_misc_iir; @@ -3188,203 +3117,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void i915_reset_device(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *reason) -{ - struct i915_gpu_error *error = &dev_priv->gpu_error; - struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; - char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; - char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; - char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; - struct wedge_me w; - - kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); - - DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); - - /* Use a watchdog to ensure that our reset completes */ - i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { - intel_prepare_reset(dev_priv); - - error->reason = reason; - error->stalled_mask = engine_mask; - - /* Signal that locked waiters should reset the GPU */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_all(&error->wait_queue); - - /* Wait for anyone holding the lock to wakeup, without - * blocking indefinitely on struct_mutex. - */ - do { - if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - i915_reset(dev_priv, engine_mask, reason); - mutex_unlock(&dev_priv->drm.struct_mutex); - } - } while (wait_on_bit_timeout(&error->flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE, - 1)); - - error->stalled_mask = 0; - error->reason = NULL; - - intel_finish_reset(dev_priv); - } - - if (!test_bit(I915_WEDGED, &error->flags)) - kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); -} - -void i915_clear_error_registers(struct drm_i915_private *dev_priv) -{ - u32 eir; - - if (!IS_GEN2(dev_priv)) - I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); - - if (INTEL_GEN(dev_priv) < 4) - I915_WRITE(IPEIR, I915_READ(IPEIR)); - else - I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); - - I915_WRITE(EIR, I915_READ(EIR)); - eir = I915_READ(EIR); - if (eir) { - /* - * some errors might have become stuck, - * mask them. - */ - DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); - I915_WRITE(EMR, I915_READ(EMR) | eir); - I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); - } - - if (INTEL_GEN(dev_priv) >= 8) { - I915_WRITE(GEN8_RING_FAULT_REG, - I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); - POSTING_READ(GEN8_RING_FAULT_REG); - } else if (INTEL_GEN(dev_priv) >= 6) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) { - I915_WRITE(RING_FAULT_REG(engine), - I915_READ(RING_FAULT_REG(engine)) & - ~RING_FAULT_VALID); - } - POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); - } -} - -/** - * i915_handle_error - handle a gpu error - * @dev_priv: i915 device private - * @engine_mask: mask representing engines that are hung - * @flags: control flags - * @fmt: Error message format string - * - * Do some basic checking of register state at error time and - * dump it to the syslog. Also call i915_capture_error_state() to make - * sure we get a record and make it available in debugfs. Fire a uevent - * so userspace knows something bad happened (should trigger collection - * of a ring dump etc.). - */ -void i915_handle_error(struct drm_i915_private *dev_priv, - u32 engine_mask, - unsigned long flags, - const char *fmt, ...) -{ - struct intel_engine_cs *engine; - unsigned int tmp; - char error_msg[80]; - char *msg = NULL; - - if (fmt) { - va_list args; - - va_start(args, fmt); - vscnprintf(error_msg, sizeof(error_msg), fmt, args); - va_end(args); - - msg = error_msg; - } - - /* - * In most cases it's guaranteed that we get here with an RPM - * reference held, for example because there is a pending GPU - * request that won't finish until the reset is done. This - * isn't the case at least when we get here by doing a - * simulated reset via debugfs, so get an RPM reference. - */ - intel_runtime_pm_get(dev_priv); - - engine_mask &= INTEL_INFO(dev_priv)->ring_mask; - - if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(dev_priv, engine_mask, msg); - i915_clear_error_registers(dev_priv); - } - - /* - * Try engine reset when available. We fall back to full reset if - * single reset fails. - */ - if (intel_has_reset_engine(dev_priv) && - !i915_terminally_wedged(&dev_priv->gpu_error)) { - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); - if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags)) - continue; - - if (i915_reset_engine(engine, msg) == 0) - engine_mask &= ~intel_engine_flag(engine); - - clear_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags); - wake_up_bit(&dev_priv->gpu_error.flags, - I915_RESET_ENGINE + engine->id); - } - } - - if (!engine_mask) - goto out; - - /* Full reset needs the mutex, stop any other user trying to do so. */ - if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { - wait_event(dev_priv->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &dev_priv->gpu_error.flags)); - goto out; - } - - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, dev_priv, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags)) - wait_on_bit(&dev_priv->gpu_error.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); - } - - i915_reset_device(dev_priv, engine_mask, msg); - - for_each_engine(engine, dev_priv, tmp) { - clear_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags); - } - - clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.reset_queue); - -out: - intel_runtime_pm_put(dev_priv); -} - /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -3400,6 +3132,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } +static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + if (dev_priv->i945gm_vblank.enabled++ == 0) + schedule_work(&dev_priv->i945gm_vblank.work); + + return i8xx_enable_vblank(dev, pipe); +} + static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3417,7 +3159,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; - uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? + u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3464,6 +3206,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + i8xx_disable_vblank(dev, pipe); + + if (--dev_priv->i945gm_vblank.enabled == 0) + schedule_work(&dev_priv->i945gm_vblank.work); +} + static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3479,7 +3231,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; - uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? + u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3497,6 +3249,60 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void i945gm_vblank_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, i945gm_vblank.work); + + /* + * Vblank interrupts fail to wake up the device from C3, + * hence we want to prevent C3 usage while vblank interrupts + * are enabled. + */ + pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos, + READ_ONCE(dev_priv->i945gm_vblank.enabled) ? + dev_priv->i945gm_vblank.c3_disable_latency : + PM_QOS_DEFAULT_VALUE); +} + +static int cstate_disable_latency(const char *name) +{ + const struct cpuidle_driver *drv; + int i; + + drv = cpuidle_get_driver(); + if (!drv) + return 0; + + for (i = 0; i < drv->state_count; i++) { + const struct cpuidle_state *state = &drv->states[i]; + + if (!strcmp(state->name, name)) + return state->exit_latency ? + state->exit_latency - 1 : 0; + } + + return 0; +} + +static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv) +{ + INIT_WORK(&dev_priv->i945gm_vblank.work, + i945gm_vblank_work_func); + + dev_priv->i945gm_vblank.c3_disable_latency = + cstate_disable_latency("C3"); + pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); +} + +static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv) +{ + cancel_work_sync(&dev_priv->i945gm_vblank.work); + pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos); +} + static void ibx_irq_reset(struct drm_i915_private *dev_priv) { if (HAS_PCH_NOP(dev_priv)) @@ -3586,11 +3392,8 @@ static void ironlake_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - if (IS_GEN5(dev_priv)) - I915_WRITE(HWSTAM, 0xffffffff); - GEN3_IRQ_RESET(DE); - if (IS_GEN7(dev_priv)) + if (IS_GEN(dev_priv, 7)) I915_WRITE(GEN7_ERR_INT, 0xffffffff); if (IS_HASWELL(dev_priv)) { @@ -3631,7 +3434,7 @@ static void gen8_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); int pipe; - gen8_master_intr_disable(dev_priv->regs); + gen8_master_intr_disable(dev_priv->uncore.regs); gen8_gt_irq_reset(dev_priv); @@ -3673,7 +3476,7 @@ static void gen11_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - gen11_master_intr_disable(dev_priv->regs); + gen11_master_intr_disable(dev_priv->uncore.regs); gen11_gt_irq_reset(dev_priv); @@ -3693,14 +3496,14 @@ static void gen11_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(GEN11_GU_MISC_); GEN3_IRQ_RESET(GEN8_PCU_); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(SDE); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { - uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; + u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -3874,7 +3677,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_hpd_irq_setup(dev_priv); } @@ -4045,7 +3848,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) } gt_irqs |= GT_RENDER_USER_INTERRUPT; - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { gt_irqs |= ILK_BSD_USER_INTERRUPT; } else { gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; @@ -4058,7 +3861,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) * RPS interrupts will get enabled/disabled on demand when RPS * itself is enabled/disabled. */ - if (HAS_VEBOX(dev_priv)) { + if (HAS_ENGINE(dev_priv, VECS0)) { pm_irqs |= PM_VEBOX_USER_INTERRUPT; dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; } @@ -4169,22 +3972,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev) static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) { /* These are interrupts we'll toggle with the ring mask register */ - uint32_t gt_interrupts[] = { - GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + u32 gt_interrupts[] = { + (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), + + (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), + 0, - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT - }; - if (HAS_L3_DPF(dev_priv)) - gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) + }; dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; @@ -4200,8 +4003,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { - uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; - uint32_t de_pipe_enables; + u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; @@ -4278,7 +4081,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev); - gen8_master_intr_enable(dev_priv->regs); + gen8_master_intr_enable(dev_priv->uncore.regs); return 0; } @@ -4330,7 +4133,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 gu_misc_masked = GEN11_GU_MISC_GSE; - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev); gen11_gt_irq_postinstall(dev_priv); @@ -4340,7 +4143,8 @@ static int gen11_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(dev_priv->regs); + gen11_master_intr_enable(dev_priv->uncore.regs); + POSTING_READ(GEN11_GFX_MSTR_IRQ); return 0; } @@ -4368,8 +4172,6 @@ static void i8xx_irq_reset(struct drm_device *dev) i9xx_pipestat_irq_reset(dev_priv); - I915_WRITE16(HWSTAM, 0xffff); - GEN2_IRQ_RESET(); } @@ -4513,7 +4315,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_WRITE16(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4537,8 +4339,6 @@ static void i915_irq_reset(struct drm_device *dev) i9xx_pipestat_irq_reset(dev_priv); - I915_WRITE(HWSTAM, 0xffffffff); - GEN3_IRQ_RESET(); } @@ -4623,7 +4423,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4648,8 +4448,6 @@ static void i965_irq_reset(struct drm_device *dev) i9xx_pipestat_irq_reset(dev_priv); - I915_WRITE(HWSTAM, 0xffffffff); - GEN3_IRQ_RESET(); } @@ -4770,10 +4568,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - notify_ring(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4802,6 +4600,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) struct intel_rps *rps = &dev_priv->gt_pm.rps; int i; + if (IS_I945GM(dev_priv)) + i945gm_vblank_work_init(dev_priv); + intel_hpd_init_work(dev_priv); INIT_WORK(&rps->work, gen6_pm_rps_work); @@ -4836,24 +4637,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 8) rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - if (IS_GEN2(dev_priv)) { - /* Gen2 doesn't have a hardware frame counter */ - dev->max_vblank_count = 0; - } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { - dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) dev->driver->get_vblank_counter = g4x_get_vblank_counter; - } else { + else if (INTEL_GEN(dev_priv) >= 3) dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - } - /* - * Opt out of the vblank disable timer on everything except gen2. - * Gen2 doesn't have a hardware frame counter and so depends on - * vblank interrupts to produce sane vblank seuquence numbers. - */ - if (!IS_GEN2(dev_priv)) - dev->vblank_disable_immediate = true; + dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on * power domain. vlv/chv can disable it at runtime and need @@ -4910,8 +4699,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = gen8_disable_vblank; if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; @@ -4924,14 +4712,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (IS_GEN2(dev_priv)) { + if (IS_GEN(dev_priv, 2)) { dev->driver->irq_preinstall = i8xx_irq_reset; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_reset; dev->driver->enable_vblank = i8xx_enable_vblank; dev->driver->disable_vblank = i8xx_disable_vblank; - } else if (IS_GEN3(dev_priv)) { + } else if (IS_I945GM(dev_priv)) { + dev->driver->irq_preinstall = i915_irq_reset; + dev->driver->irq_postinstall = i915_irq_postinstall; + dev->driver->irq_uninstall = i915_irq_reset; + dev->driver->irq_handler = i915_irq_handler; + dev->driver->enable_vblank = i945gm_enable_vblank; + dev->driver->disable_vblank = i945gm_disable_vblank; + } else if (IS_GEN(dev_priv, 3)) { dev->driver->irq_preinstall = i915_irq_reset; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_reset; @@ -4961,6 +4756,9 @@ void intel_irq_fini(struct drm_i915_private *i915) { int i; + if (IS_I945GM(i915)) + i945gm_vblank_work_fini(i915); + for (i = 0; i < MAX_L3_SLICES; ++i) kfree(i915->l3_parity.remap_info[i]); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 2e0356561839..b5be0abbba35 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600, "triaging and debugging hangs."); #endif -i915_param_named_unsafe(enable_hangcheck, bool, 0644, +i915_param_named_unsafe(enable_hangcheck, bool, 0600, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); @@ -97,8 +97,10 @@ i915_param_named_unsafe(disable_power_well, int, 0400, i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)"); -i915_param_named(fastboot, bool, 0600, - "Try to skip unnecessary mode sets at boot time (default: false)"); +i915_param_named(fastboot, int, 0600, + "Try to skip unnecessary mode sets at boot time " + "(0=disabled, 1=enabled) " + "Default: -1 (use per-chip default)"); i915_param_named_unsafe(prefault_disable, bool, 0600, "Disable page prefaulting for pread/pwrite/reloc (default:false). " @@ -203,3 +205,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p) I915_PARAMS_FOR_EACH(PRINT); #undef PRINT } + +static __always_inline void dup_param(const char *type, void *x) +{ + if (!__builtin_strcmp(type, "char *")) + *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC); +} + +void i915_params_copy(struct i915_params *dest, const struct i915_params *src) +{ + *dest = *src; +#define DUP(T, x, ...) dup_param(#T, &dest->x); + I915_PARAMS_FOR_EACH(DUP); +#undef DUP +} + +static __always_inline void free_param(const char *type, void *x) +{ + if (!__builtin_strcmp(type, "char *")) { + kfree(*(void **)x); + *(void **)x = NULL; + } +} + +/* free the allocated members, *not* the passed in params itself */ +void i915_params_free(struct i915_params *params) +{ +#define FREE(T, x, ...) free_param(#T, ¶ms->x); + I915_PARAMS_FOR_EACH(FREE); +#undef FREE +} diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 7e56c516c815..3f14e9881a0d 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -33,6 +33,15 @@ struct drm_printer; #define ENABLE_GUC_SUBMISSION BIT(0) #define ENABLE_GUC_LOAD_HUC BIT(1) +/* + * Invoke param, a function-like macro, for each i915 param, with arguments: + * + * param(type, name, value) + * + * type: parameter type, one of {bool, int, unsigned int, char *} + * name: name of the parameter + * value: initial/default value of the parameter + */ #define I915_PARAMS_FOR_EACH(param) \ param(char *, vbt_firmware, NULL) \ param(int, modeset, -1) \ @@ -54,10 +63,10 @@ struct drm_printer; param(int, edp_vswing, 0) \ param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ + param(int, fastboot, -1) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ - param(bool, fastboot, false) \ param(bool, prefault_disable, false) \ param(bool, load_detect_test, false) \ param(bool, force_reset_modeset_test, false) \ @@ -78,6 +87,8 @@ struct i915_params { extern struct i915_params i915_modparams __read_mostly; void i915_params_dump(const struct i915_params *params, struct drm_printer *p); +void i915_params_copy(struct i915_params *dest, const struct i915_params *src); +void i915_params_free(struct i915_params *params); #endif diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6350db5503cd..6ffb85ddac53 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -26,13 +26,46 @@ #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> +#include <drm/drm_drv.h> + #include "i915_drv.h" +#include "i915_globals.h" #include "i915_selftest.h" -#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x) +#define PLATFORM(x) .platform = (x) #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) -#define GEN_DEFAULT_PIPEOFFSETS \ +#define I845_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + } + +#define I9XX_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + } + +#define IVB_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + } + +#define HSW_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -46,7 +79,7 @@ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ } -#define GEN_CHV_PIPEOFFSETS \ +#define CHV_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -58,76 +91,126 @@ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ } -#define CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } +#define I845_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + } + +#define I9XX_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + } + +#define CHV_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + [PIPE_C] = CHV_CURSOR_C_OFFSET, \ + } #define IVB_CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + } -#define BDW_COLORS \ - .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } +#define I9XX_COLORS \ + .color = { .gamma_lut_size = 256 } +#define I965_COLORS \ + .color = { .gamma_lut_size = 129, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } +#define ILK_COLORS \ + .color = { .gamma_lut_size = 1024 } +#define IVB_COLORS \ + .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 } #define CHV_COLORS \ - .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } + .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } #define GLK_COLORS \ - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } + .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + } /* Keep in gen based order, and chronological order within a gen */ #define GEN_DEFAULT_PAGE_SIZES \ .page_sizes = I915_GTT_PAGE_SIZE_4K -#define GEN2_FEATURES \ +#define I830_FEATURES \ + GEN(2), \ + .is_mobile = 1, \ + .num_pipes = 2, \ + .display.has_overlay = 1, \ + .display.cursor_needs_physical = 1, \ + .display.overlay_needs_physical = 1, \ + .display.has_gmch = 1, \ + .gpu_reset_clobbers_display = true, \ + .hws_needs_physical = 1, \ + .unfenced_needs_alignment = 1, \ + .engine_mask = BIT(RCS0), \ + .has_snoop = true, \ + .has_coherent_ggtt = false, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + GEN_DEFAULT_PAGE_SIZES + +#define I845_FEATURES \ GEN(2), \ .num_pipes = 1, \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ - .display.has_gmch_display = 1, \ + .display.has_gmch = 1, \ + .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .ring_mask = RENDER_RING, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I845_PIPE_OFFSETS, \ + I845_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i830_info = { - GEN2_FEATURES, + I830_FEATURES, PLATFORM(INTEL_I830), - .is_mobile = 1, - .display.cursor_needs_physical = 1, - .num_pipes = 2, /* legal, last one wins */ }; static const struct intel_device_info intel_i845g_info = { - GEN2_FEATURES, + I845_FEATURES, PLATFORM(INTEL_I845G), }; static const struct intel_device_info intel_i85x_info = { - GEN2_FEATURES, + I830_FEATURES, PLATFORM(INTEL_I85X), - .is_mobile = 1, - .num_pipes = 2, /* legal, last one wins */ - .display.cursor_needs_physical = 1, .display.has_fbc = 1, }; static const struct intel_device_info intel_i865g_info = { - GEN2_FEATURES, + I845_FEATURES, PLATFORM(INTEL_I865G), }; #define GEN3_FEATURES \ GEN(3), \ .num_pipes = 2, \ - .display.has_gmch_display = 1, \ - .ring_mask = RENDER_RING, \ + .display.has_gmch = 1, \ + .gpu_reset_clobbers_display = true, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I9XX_COLORS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, @@ -185,7 +268,14 @@ static const struct intel_device_info intel_g33_info = { .display.has_overlay = 1, }; -static const struct intel_device_info intel_pineview_info = { +static const struct intel_device_info intel_pineview_g_info = { + GEN3_FEATURES, + PLATFORM(INTEL_PINEVIEW), + .display.has_hotplug = 1, + .display.has_overlay = 1, +}; + +static const struct intel_device_info intel_pineview_m_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .is_mobile = 1, @@ -197,13 +287,15 @@ static const struct intel_device_info intel_pineview_info = { GEN(4), \ .num_pipes = 2, \ .display.has_hotplug = 1, \ - .display.has_gmch_display = 1, \ - .ring_mask = RENDER_RING, \ + .display.has_gmch = 1, \ + .gpu_reset_clobbers_display = true, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + I965_COLORS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, @@ -227,7 +319,8 @@ static const struct intel_device_info intel_i965gm_info = { static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .ring_mask = RENDER_RING | BSD_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0), + .gpu_reset_clobbers_display = false, }; static const struct intel_device_info intel_gm45_info = { @@ -236,21 +329,23 @@ static const struct intel_device_info intel_gm45_info = { .is_mobile = 1, .display.has_fbc = 1, .display.supports_tv = 1, - .ring_mask = RENDER_RING | BSD_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0), + .gpu_reset_clobbers_display = false, }; #define GEN5_FEATURES \ GEN(5), \ .num_pipes = 2, \ .display.has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ .has_rc6 = 0, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + ILK_COLORS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_ironlake_d_info = { GEN5_FEATURES, @@ -269,15 +364,17 @@ static const struct intel_device_info intel_ironlake_m_info = { .num_pipes = 2, \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .ppgtt = INTEL_PPGTT_ALIASING, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + .ppgtt_type = INTEL_PPGTT_ALIASING, \ + .ppgtt_size = 31, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + ILK_COLORS, \ + GEN_DEFAULT_PAGE_SIZES #define SNB_D_PLATFORM \ GEN6_FEATURES, \ @@ -314,15 +411,17 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .num_pipes = 3, \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .ppgtt = INTEL_PPGTT_FULL, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - IVB_CURSOR_OFFSETS + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 31, \ + IVB_PIPE_OFFSETS, \ + IVB_CURSOR_OFFSETS, \ + IVB_COLORS, \ + GEN_DEFAULT_PAGE_SIZES #define IVB_D_PLATFORM \ GEN7_FEATURES, \ @@ -370,26 +469,29 @@ static const struct intel_device_info intel_valleyview_info = { .num_pipes = 2, .has_runtime_pm = 1, .has_rc6 = 1, - .display.has_gmch_display = 1, + .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display_mmio_offset = VLV_DISPLAY_BASE, + I9XX_PIPE_OFFSETS, + I9XX_CURSOR_OFFSETS, + I965_COLORS, GEN_DEFAULT_PAGE_SIZES, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS }; #define G75_FEATURES \ GEN7_FEATURES, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ .display.has_psr = 1, \ .display.has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ + HSW_PIPE_OFFSETS, \ .has_runtime_pm = 1 #define HSW_PLATFORM \ @@ -415,11 +517,11 @@ static const struct intel_device_info intel_haswell_gt3_info = { #define GEN8_FEATURES \ G75_FEATURES, \ GEN(8), \ - BDW_COLORS, \ .page_sizes = I915_GTT_PAGE_SIZE_4K | \ I915_GTT_PAGE_SIZE_2M, \ .has_logical_ring_contexts = 1, \ - .ppgtt = INTEL_PPGTT_FULL_4LVL, \ + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 48, \ .has_64bit_reloc = 1, \ .has_reset_engine = 1 @@ -448,7 +550,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = { static const struct intel_device_info intel_broadwell_gt3_info = { BDW_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; static const struct intel_device_info intel_cherryview_info = { @@ -457,21 +560,22 @@ static const struct intel_device_info intel_cherryview_info = { .num_pipes = 3, .display.has_hotplug = 1, .is_lp = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_logical_ring_contexts = 1, - .display.has_gmch_display = 1, - .ppgtt = INTEL_PPGTT_FULL, + .display.has_gmch = 1, + .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, .display_mmio_offset = VLV_DISPLAY_BASE, - GEN_DEFAULT_PAGE_SIZES, - GEN_CHV_PIPEOFFSETS, - CURSOR_OFFSETS, + CHV_PIPE_OFFSETS, + CHV_CURSOR_OFFSETS, CHV_COLORS, + GEN_DEFAULT_PAGE_SIZES, }; #define GEN9_DEFAULT_PAGE_SIZES \ @@ -507,7 +611,8 @@ static const struct intel_device_info intel_skylake_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + .engine_mask = \ + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) static const struct intel_device_info intel_skylake_gt3_info = { @@ -524,7 +629,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { GEN(9), \ .is_lp = 1, \ .display.has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .num_pipes = 3, \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ @@ -532,22 +637,22 @@ static const struct intel_device_info intel_skylake_gt4_info = { .display.has_fbc = 1, \ .display.has_psr = 1, \ .has_runtime_pm = 1, \ - .has_pooled_eu = 0, \ .display.has_csr = 1, \ .has_rc6 = 1, \ .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ .has_guc = 1, \ - .ppgtt = INTEL_PPGTT_FULL_4LVL, \ + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 48, \ .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .display.has_ipc = 1, \ - GEN9_DEFAULT_PAGE_SIZES, \ - GEN_DEFAULT_PIPEOFFSETS, \ + HSW_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ - BDW_COLORS + IVB_COLORS, \ + GEN9_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_broxton_info = { GEN9_LP_FEATURES, @@ -579,7 +684,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = { static const struct intel_device_info intel_kabylake_gt3_info = { KBL_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; #define CFL_PLATFORM \ @@ -599,7 +705,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = { static const struct intel_device_info intel_coffeelake_gt3_info = { CFL_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; #define GEN10_FEATURES \ @@ -635,13 +742,22 @@ static const struct intel_device_info intel_cannonlake_info = { }, \ GEN(11), \ .ddb_size = 2048, \ - .has_logical_ring_elsq = 1 + .has_logical_ring_elsq = 1, \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 } static const struct intel_device_info intel_icelake_11_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), + .engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), +}; + +static const struct intel_device_info intel_elkhartlake_info = { + GEN11_FEATURES, + PLATFORM(INTEL_ELKHARTLAKE), .is_alpha_support = 1, - .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, + .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0), + .ppgtt_size = 36, }; #undef GEN @@ -667,7 +783,8 @@ static const struct pci_device_id pciidlist[] = { INTEL_I965GM_IDS(&intel_i965gm_info), INTEL_GM45_IDS(&intel_gm45_info), INTEL_G45_IDS(&intel_g45_info), - INTEL_PINEVIEW_IDS(&intel_pineview_info), + INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info), + INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info), INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), @@ -701,6 +818,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), @@ -708,8 +826,11 @@ static const struct pci_device_id pciidlist[] = { INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), + INTEL_EHL_IDS(&intel_elkhartlake_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -787,6 +908,10 @@ static int __init i915_init(void) bool use_kms = true; int err; + err = i915_globals_init(); + if (err) + return err; + err = i915_mock_selftests(); if (err) return err > 0 ? 0 : err; @@ -818,6 +943,7 @@ static void __exit i915_exit(void) return; pci_unregister_driver(&i915_pci_driver); + i915_globals_exit(); } module_init(i915_init); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 4529edfdcfc8..39a4804091d7 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream, static struct intel_context *oa_pin_context(struct drm_i915_private *i915, struct i915_gem_context *ctx) { - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; struct intel_context *ce; int ret; @@ -1364,8 +1364,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) goto unlock; } - ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); - if (ret) - goto err_unref; + i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); /* PreHSW required 512K alignment, HSW requires 16M */ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); @@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) * It's fine to put out-of-date values into these per-context registers * in the case that the OA unit has been disabled. */ -static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, - u32 *reg_state, - const struct i915_oa_config *oa_config) +static void +gen8_update_reg_state_unlocked(struct intel_context *ce, + u32 *reg_state, + const struct i915_oa_config *oa_config) { - struct drm_i915_private *dev_priv = ctx->i915; - u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; - u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + struct drm_i915_private *i915 = ce->gem_context->i915; + u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ i915_reg_t flex_regs[] = { EU_PERF_CNTL0, @@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, int i; CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, - (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | GEN8_OA_COUNTER_RESUME); for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { @@ -1677,6 +1676,10 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, CTX_REG(reg_state, state_offset, flex_regs[i], value); } + + CTX_REG(reg_state, + CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, + gen8_make_rpcs(i915, &ce->sseu)); } /* @@ -1706,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, const struct i915_oa_config *oa_config) { - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + struct intel_engine_cs *engine = dev_priv->engine[RCS0]; unsigned int map_type = i915_coherent_map_type(dev_priv); struct i915_gem_context *ctx; struct i915_request *rq; @@ -1735,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, /* Update all contexts now that we've stalled the submission. */ list_for_each_entry(ctx, &dev_priv->contexts.list, link) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce = intel_context_lookup(ctx, engine); u32 *regs; /* OA settings will be set upon first use */ - if (!ce->state) + if (!ce || !ce->state) continue; regs = i915_gem_object_pin_map(ce->state->obj, map_type); @@ -1749,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, ce->state->obj->mm.dirty = true; regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); - gen8_update_reg_state_unlocked(ctx, regs, oa_config); + gen8_update_reg_state_unlocked(ce, regs, oa_config); i915_gem_object_unpin_map(ce->state->obj); } @@ -1796,7 +1799,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) * be read back from automatically triggered reports, as part of the * RPT_ID field. */ - if (IS_GEN(dev_priv, 9, 11)) { + if (IS_GEN_RANGE(dev_priv, 9, 11)) { I915_WRITE(GEN8_OA_DEBUG, _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); @@ -1917,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) static void gen7_oa_disable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = &stream->dev_priv->uncore; - I915_WRITE(GEN7_OACONTROL, 0); - if (intel_wait_for_register(dev_priv, + intel_uncore_write(uncore, GEN7_OACONTROL, 0); + if (intel_wait_for_register(uncore, GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 50)) DRM_ERROR("wait for OA to be disabled timed out\n"); @@ -1928,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) static void gen8_oa_disable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = &stream->dev_priv->uncore; - I915_WRITE(GEN8_OACONTROL, 0); - if (intel_wait_for_register(dev_priv, + intel_uncore_write(uncore, GEN8_OACONTROL, 0); + if (intel_wait_for_register(uncore, GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 50)) DRM_ERROR("wait for OA to be disabled timed out\n"); @@ -2087,8 +2090,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - intel_runtime_pm_get(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + stream->wakeref = intel_runtime_pm_get(dev_priv); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); if (ret) @@ -2098,21 +2101,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (ret) goto err_lock; + stream->ops = &i915_oa_stream_ops; + dev_priv->perf.oa.exclusive_stream = stream; + ret = dev_priv->perf.oa.ops.enable_metric_set(stream); if (ret) { DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; } - stream->ops = &i915_oa_stream_ops; - - dev_priv->perf.oa.exclusive_stream = stream; - mutex_unlock(&dev_priv->drm.struct_mutex); return 0; err_enable: + dev_priv->perf.oa.exclusive_stream = NULL; dev_priv->perf.oa.ops.disable_metric_set(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -2122,8 +2125,8 @@ err_lock: err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv, stream->wakeref); err_config: if (stream->ctx) @@ -2133,17 +2136,17 @@ err_config: } void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - u32 *reg_state) + struct intel_context *ce, + u32 *regs) { struct i915_perf_stream *stream; - if (engine->id != RCS) + if (engine->class != RENDER_CLASS) return; stream = engine->i915->perf.oa.exclusive_stream; if (stream) - gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); + gen8_update_reg_state_unlocked(ce, regs, stream->oa_config); } /** @@ -2646,7 +2649,7 @@ err: static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) { return div64_u64(1000000000ULL * (2ULL << exponent), - 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz); + 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz); } /** @@ -2876,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv) sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); - if (IS_HASWELL(dev_priv)) { - i915_perf_load_test_config_hsw(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - i915_perf_load_test_config_bdw(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) { - i915_perf_load_test_config_chv(dev_priv); + if (INTEL_GEN(dev_priv) >= 11) { + i915_perf_load_test_config_icl(dev_priv); + } else if (IS_CANNONLAKE(dev_priv)) { + i915_perf_load_test_config_cnl(dev_priv); + } else if (IS_COFFEELAKE(dev_priv)) { + if (IS_CFL_GT2(dev_priv)) + i915_perf_load_test_config_cflgt2(dev_priv); + if (IS_CFL_GT3(dev_priv)) + i915_perf_load_test_config_cflgt3(dev_priv); + } else if (IS_GEMINILAKE(dev_priv)) { + i915_perf_load_test_config_glk(dev_priv); + } else if (IS_KABYLAKE(dev_priv)) { + if (IS_KBL_GT2(dev_priv)) + i915_perf_load_test_config_kblgt2(dev_priv); + else if (IS_KBL_GT3(dev_priv)) + i915_perf_load_test_config_kblgt3(dev_priv); + } else if (IS_BROXTON(dev_priv)) { + i915_perf_load_test_config_bxt(dev_priv); } else if (IS_SKYLAKE(dev_priv)) { if (IS_SKL_GT2(dev_priv)) i915_perf_load_test_config_sklgt2(dev_priv); @@ -2889,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv) i915_perf_load_test_config_sklgt3(dev_priv); else if (IS_SKL_GT4(dev_priv)) i915_perf_load_test_config_sklgt4(dev_priv); - } else if (IS_BROXTON(dev_priv)) { - i915_perf_load_test_config_bxt(dev_priv); - } else if (IS_KABYLAKE(dev_priv)) { - if (IS_KBL_GT2(dev_priv)) - i915_perf_load_test_config_kblgt2(dev_priv); - else if (IS_KBL_GT3(dev_priv)) - i915_perf_load_test_config_kblgt3(dev_priv); - } else if (IS_GEMINILAKE(dev_priv)) { - i915_perf_load_test_config_glk(dev_priv); - } else if (IS_COFFEELAKE(dev_priv)) { - if (IS_CFL_GT2(dev_priv)) - i915_perf_load_test_config_cflgt2(dev_priv); - if (IS_CFL_GT3(dev_priv)) - i915_perf_load_test_config_cflgt3(dev_priv); - } else if (IS_CANNONLAKE(dev_priv)) { - i915_perf_load_test_config_cnl(dev_priv); - } else if (IS_ICELAKE(dev_priv)) { - i915_perf_load_test_config_icl(dev_priv); - } + } else if (IS_CHERRYVIEW(dev_priv)) { + i915_perf_load_test_config_chv(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + i915_perf_load_test_config_bdw(dev_priv); + } else if (IS_HASWELL(dev_priv)) { + i915_perf_load_test_config_hsw(dev_priv); +} if (dev_priv->perf.oa.test_config.id == 0) goto sysfs_error; @@ -3021,7 +3024,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) (addr >= 0x182300 && addr <= 0x1823A4); } -static uint32_t mask_reg_value(u32 reg, u32 val) +static u32 mask_reg_value(u32 reg, u32 val) { /* HALF_SLICE_CHICKEN2 is programmed with a the * WaDisableSTUnitPowerOptimization workaround. Make sure the value @@ -3052,7 +3055,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, if (!n_regs) return NULL; - if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) + if (!access_ok(regs, n_regs * sizeof(u32) * 2)) return ERR_PTR(-EFAULT); /* No is_valid function means we're not allowing any register to be programmed. */ @@ -3415,7 +3418,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.read = gen8_oa_read; dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; - if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) { + if (IS_GEN_RANGE(dev_priv, 8, 9)) { dev_priv->perf.oa.ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; dev_priv->perf.oa.ops.is_valid_mux_reg = @@ -3431,7 +3434,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; - if (IS_GEN8(dev_priv)) { + if (IS_GEN(dev_priv, 8)) { dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; @@ -3442,7 +3445,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } - } else if (IS_GEN(dev_priv, 10, 11)) { + } else if (IS_GEN_RANGE(dev_priv, 10, 11)) { dev_priv->perf.oa.ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; dev_priv->perf.oa.ops.is_valid_mux_reg = @@ -3471,7 +3474,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); oa_sample_rate_hard_limit = 1000 * - (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); + (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); mutex_init(&dev_priv->perf.metrics_lock); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..46a52da3db29 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -5,6 +5,7 @@ */ #include <linux/irq.h> +#include <linux/pm_runtime.h> #include "i915_pmu.h" #include "intel_ringbuffer.h" #include "i915_drv.h" @@ -101,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) * * Use RCS as proxy for all engines. */ - else if (intel_engine_supports_stats(i915->engine[RCS])) + else if (intel_engine_supports_stats(i915->engine[RCS0])) enable &= ~BIT(I915_SAMPLE_BUSY); /* @@ -148,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) spin_unlock_irq(&i915->pmu.lock); } -static bool grab_forcewake(struct drm_i915_private *i915, bool fw) -{ - if (!fw) - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); - - return true; -} - static void add_sample(struct i915_pmu_sample *sample, u32 val) { @@ -167,50 +160,51 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) { struct intel_engine_cs *engine; enum intel_engine_id id; - bool fw = false; + intel_wakeref_t wakeref; + unsigned long flags; if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; - if (!dev_priv->gt.awake) - return; - - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = 0; + if (READ_ONCE(dev_priv->gt.awake)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return; + spin_lock_irqsave(&dev_priv->uncore.lock, flags); for_each_engine(engine, dev_priv, id) { - u32 current_seqno = intel_engine_get_seqno(engine); - u32 last_seqno = intel_engine_last_submit(engine); + struct intel_engine_pmu *pmu = &engine->pmu; + bool busy; u32 val; - val = !i915_seqno_passed(current_seqno, last_seqno); - - if (val) - add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], - period_ns); - - if (val && (engine->pmu.enable & - (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { - fw = grab_forcewake(dev_priv, fw); - - val = I915_READ_FW(RING_CTL(engine->mmio_base)); - } else { - val = 0; - } + val = I915_READ_FW(RING_CTL(engine->mmio_base)); + if (val == 0) /* powerwell off => engine idle */ + continue; if (val & RING_WAIT) - add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], - period_ns); - + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); if (val & RING_WAIT_SEMAPHORE) - add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], - period_ns); - } + add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); - if (fw) - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + /* + * While waiting on a semaphore or event, MI_MODE reports the + * ring as idle. However, previously using the seqno, and with + * execlists sampling, we account for the ring waiting as the + * engine being busy. Therefore, we record the sample as being + * busy if either waiting or !idle. + */ + busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); + if (!busy) { + val = I915_READ_FW(RING_MI_MODE(engine->mmio_base)); + busy = !(val & MODE_IDLE); + } + if (busy) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); + } + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } static void @@ -227,11 +221,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) u32 val; val = dev_priv->gt_pm.rps.cur_freq; - if (dev_priv->gt.awake && - intel_runtime_pm_get_if_in_use(dev_priv)) { - val = intel_get_cagf(dev_priv, - I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put(dev_priv); + if (dev_priv->gt.awake) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) + val = intel_get_cagf(dev_priv, + I915_READ_NOTRACE(GEN6_RPSTAT1)); } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], @@ -443,12 +438,14 @@ static u64 __get_rc6(struct drm_i915_private *i915) static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) + intel_wakeref_t wakeref; unsigned long flags; u64 val; - if (intel_runtime_pm_get_if_in_use(i915)) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (wakeref) { val = __get_rc6(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); /* * If we are coming back from being runtime suspended we must @@ -478,7 +475,6 @@ static u64 get_rc6(struct drm_i915_private *i915) * counter value. */ spin_lock_irqsave(&i915->pmu.lock, flags); - spin_lock(&kdev->power.lock); /* * After the above branch intel_runtime_pm_get_if_in_use failed @@ -491,16 +487,13 @@ static u64 get_rc6(struct drm_i915_private *i915) * suspended and if not we cannot do better than report the last * known RC6 value. */ - if (kdev->power.runtime_status == RPM_SUSPENDED) { - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + if (pm_runtime_status_suspended(kdev)) { + val = pm_runtime_suspended_time(kdev); - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_time_last = val; - val = jiffies_to_nsecs(val); + val -= i915->pmu.suspended_time_last; val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; @@ -510,7 +503,6 @@ static u64 get_rc6(struct drm_i915_private *i915) val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; } - spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } @@ -594,7 +586,8 @@ static void i915_pmu_enable(struct perf_event *event) * Update the bitmask of enabled events and increment * the event reference counter. */ - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); i915->pmu.enable |= BIT_ULL(bit); i915->pmu.enable_count[bit]++; @@ -615,11 +608,16 @@ static void i915_pmu_enable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - engine->pmu.enable |= BIT(sample); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != + I915_ENGINE_SAMPLE_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != + I915_ENGINE_SAMPLE_COUNT); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); + + engine->pmu.enable |= BIT(sample); engine->pmu.enable_count[sample]++; } @@ -649,9 +647,11 @@ static void i915_pmu_disable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); + /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. @@ -660,7 +660,7 @@ static void i915_pmu_disable(struct perf_event *event) engine->pmu.enable &= ~BIT(sample); } - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); /* * Decrement the reference count and clear the enabled diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..4fc4f2478301 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -31,6 +31,8 @@ enum { ((1 << I915_PMU_SAMPLE_BITS) + \ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) +#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) + struct i915_pmu_sample { u64 cur; }; @@ -95,9 +97,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_jiffies_last: Cached suspend time from PM core. + * @suspended_time_last: Cached suspend time from PM core. */ - unsigned long suspended_jiffies_last; + u64 suspended_time_last; /** * @i915_attr: Memory block holding device attributes. */ diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h new file mode 100644 index 000000000000..cc44ebd3b553 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef _I915_PRIOLIST_TYPES_H_ +#define _I915_PRIOLIST_TYPES_H_ + +#include <linux/list.h> +#include <linux/rbtree.h> + +#include <uapi/drm/i915_drm.h> + +enum { + I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, + I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, + I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + + I915_PRIORITY_INVALID = INT_MIN +}; + +#define I915_USER_PRIORITY_SHIFT 3 +#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) + +#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) +#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) + +#define I915_PRIORITY_WAIT ((u8)BIT(0)) +#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) +#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2)) + +#define __NO_PREEMPTION (I915_PRIORITY_WAIT) + +struct i915_priolist { + struct list_head requests[I915_PRIORITY_COUNT]; + struct rb_node node; + unsigned long used; + int priority; +}; + +#endif /* _I915_PRIOLIST_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index eeaa3d506d95..969e514916ab 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -52,7 +52,7 @@ enum vgt_g2v_type { /* * VGT capabilities type */ -#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) +#define VGT_CAPS_FULL_PPGTT BIT(2) #define VGT_CAPS_HWSP_EMULATION BIT(3) #define VGT_CAPS_HUGE_GTT BIT(4) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 6fc4b8eeab42..782183b78f49 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -10,12 +10,34 @@ #include "i915_query.h" #include <uapi/drm/i915_drm.h> +static int copy_query_item(void *query_hdr, size_t query_sz, + u32 total_length, + struct drm_i915_query_item *query_item) +{ + if (query_item->length == 0) + return total_length; + + if (query_item->length < total_length) + return -EINVAL; + + if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), + query_sz)) + return -EFAULT; + + if (!access_ok(u64_to_user_ptr(query_item->data_ptr), + total_length)) + return -EFAULT; + + return 0; +} + static int query_topology_info(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) { - const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; + int ret; if (query_item->flags != 0) return -EINVAL; @@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv, total_length = sizeof(topo) + slice_length + subslice_length + eu_length; - if (query_item->length == 0) - return total_length; - - if (query_item->length < total_length) - return -EINVAL; - - if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr), - sizeof(topo))) - return -EFAULT; + ret = copy_query_item(&topo, sizeof(topo), total_length, + query_item); + if (ret != 0) + return ret; if (topo.flags != 0) return -EINVAL; - if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr), - total_length)) - return -EFAULT; - memset(&topo, 0, sizeof(topo)); topo.max_slices = sseu->max_slices; topo.max_subslices = sseu->max_subslices; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..00e03560c4e7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,6 +25,9 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ +#include <linux/bitfield.h> +#include <linux/bits.h> + /** * DOC: The i915 register macro definition style guide * @@ -59,15 +62,13 @@ * significant to least significant bit. Indent the register content macros * using two extra spaces between ``#define`` and the macro name. * - * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field - * contents so that they are already shifted in place, and can be directly - * OR'd. For convenience, function-like macros may be used to define bit fields, - * but do note that the macros may be needed to read as well as write the - * register contents. + * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents + * using ``REG_FIELD_PREP(mask, value)``. This will define the values already + * shifted in place, so they can be directly OR'd together. For convenience, + * function-like macros may be used to define bit fields, but do note that the + * macros may be needed to read as well as write the register contents. * - * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in - * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix - * to the name. + * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name. * * Group the register and its contents together without blank lines, separate * from other registers and their contents with one blank line. @@ -105,26 +106,87 @@ * #define _FOO_A 0xf000 * #define _FOO_B 0xf001 * #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B) - * #define FOO_ENABLE (1 << 31) - * #define FOO_MODE_MASK (0xf << 16) - * #define FOO_MODE_SHIFT 16 - * #define FOO_MODE_BAR (0 << 16) - * #define FOO_MODE_BAZ (1 << 16) - * #define FOO_MODE_QUX_SNB (2 << 16) + * #define FOO_ENABLE REG_BIT(31) + * #define FOO_MODE_MASK REG_GENMASK(19, 16) + * #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0) + * #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1) + * #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2) * * #define BAR _MMIO(0xb000) * #define GEN8_BAR _MMIO(0xb888) */ +/** + * REG_BIT() - Prepare a u32 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u32, with compile time checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT(__n) \ + ((u32)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \ + ((__n) < 0 || (__n) > 31)))) + +/** + * REG_GENMASK() - Prepare a continuous u32 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u32, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK(__high, __low) \ + ((u32)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \ + __builtin_constant_p(__low) && \ + ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) + +/* + * Local integer constant expression version of is_power_of_2(). + */ +#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) + +/** + * REG_FIELD_PREP() - Prepare a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + + * Local copy of FIELD_PREP() to generate an integer constant expression, force + * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP(__mask, __val) \ + ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + +/** + * REG_FIELD_GET() - Extract a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u32 and for consistency with + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) + typedef struct { - uint32_t reg; + u32 reg; } i915_reg_t; #define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) #define INVALID_MMIO_REG _MMIO(0) -static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg) +static inline u32 i915_mmio_reg_offset(i915_reg_t reg) { return reg.reg; } @@ -139,6 +201,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); } +#define VLV_DISPLAY_BASE 0x180000 +#define VLV_MIPI_BASE VLV_DISPLAY_BASE +#define BXT_MIPI_BASE 0x60000 + +#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset) + /* * Given the first two numbers __a and __b of arbitrarily many evenly spaced * numbers, pick the 0-based __index'th value. @@ -179,15 +247,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * Device info offset array based helpers for groups of registers with unevenly * spaced base offsets. */ -#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ - dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ - dev_priv->info.display_mmio_offset) -#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ - dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ - dev_priv->info.display_mmio_offset) -#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ - dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ - dev_priv->info.display_mmio_offset) +#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \ + INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \ + DISPLAY_MMIO_BASE(dev_priv)) +#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \ + INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \ + DISPLAY_MMIO_BASE(dev_priv)) +#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \ + INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \ + DISPLAY_MMIO_BASE(dev_priv)) #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) #define _MASKED_FIELD(mask, value) ({ \ @@ -204,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* Engine ID */ -#define RCS_HW 0 -#define VCS_HW 1 -#define BCS_HW 2 -#define VECS_HW 3 -#define VCS2_HW 4 -#define VCS3_HW 6 -#define VCS4_HW 7 -#define VECS2_HW 12 +#define RCS0_HW 0 +#define VCS0_HW 1 +#define BCS0_HW 2 +#define VECS0_HW 3 +#define VCS1_HW 4 +#define VCS2_HW 6 +#define VCS3_HW 7 +#define VECS1_HW 12 /* Engine class */ @@ -347,10 +415,28 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_GRDOM_MEDIA4 (1 << 8) #define GEN11_GRDOM_VECS (1 << 13) #define GEN11_GRDOM_VECS2 (1 << 14) - -#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228) -#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518) -#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220) +#define GEN11_GRDOM_SFC0 (1 << 17) +#define GEN11_GRDOM_SFC1 (1 << 18) + +#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1)) +#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance)) + +#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C) +#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0) +#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890) +#define GEN11_VCS_SFC_USAGE_BIT (1 << 0) +#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1) + +#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C) +#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0) +#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018) +#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0) +#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014) +#define GEN11_VECS_SFC_USAGE_BIT (1 << 0) + +#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) +#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) +#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) #define PP_DIR_DCLV_2G 0xffffffff #define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4) @@ -1020,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* See configdb bunit SB addr map */ #define BUNIT_REG_BISOC 0x11 -#define PUNIT_REG_DSPFREQ 0x36 +/* PUNIT_REG_*SSPM0 */ +#define _SSPM0_SSC(val) ((val) << 0) +#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) +#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) +#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) +#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) +#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) +#define _SSPM0_SSS(val) ((val) << 24) +#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) +#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) +#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) +#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) +#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) + +/* PUNIT_REG_*SSPM1 */ +#define SSPM1_FREQSTAT_SHIFT 24 +#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) +#define SSPM1_FREQGUAR_SHIFT 8 +#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) +#define SSPM1_FREQ_SHIFT 0 +#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) + +#define PUNIT_REG_VEDSSPM0 0x32 +#define PUNIT_REG_VEDSSPM1 0x33 + +#define PUNIT_REG_DSPSSPM 0x36 #define DSPFREQSTAT_SHIFT_CHV 24 #define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) #define DSPFREQGUAR_SHIFT_CHV 8 @@ -1045,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) #define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) +#define PUNIT_REG_ISPSSPM0 0x39 +#define PUNIT_REG_ISPSSPM1 0x3a + /* * i915_power_well_id: * @@ -1790,7 +1904,7 @@ enum i915_power_well_id { #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 -#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ @@ -1798,7 +1912,7 @@ enum i915_power_well_id { _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_F_GRP_OFFSET) + \ 4 * (dw)) -#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ @@ -1834,15 +1948,15 @@ enum i915_power_well_id { #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 -#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) -#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ +#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) +#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) +#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) #define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) #define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) -#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) +#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1864,12 +1978,16 @@ enum i915_power_well_id { #define RTERM_SELECT(x) ((x) << 3) #define RTERM_SELECT_MASK (0x7 << 3) -#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) -#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) +#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) +#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) +#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) +#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) +#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) +#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) -#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ +#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) #define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C @@ -1880,8 +1998,8 @@ enum i915_power_well_id { #define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C #define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C #define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define MG_TX1_LINK_PARAMS(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ +#define MG_TX1_LINK_PARAMS(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ MG_TX_LINK_PARAMS_TX1LN1_PORT1) @@ -1893,8 +2011,8 @@ enum i915_power_well_id { #define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC #define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC #define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define MG_TX2_LINK_PARAMS(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ +#define MG_TX2_LINK_PARAMS(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ MG_TX_LINK_PARAMS_TX2LN1_PORT1) #define CRI_USE_FS32 (1 << 5) @@ -1907,8 +2025,8 @@ enum i915_power_well_id { #define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C #define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C #define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define MG_TX1_PISO_READLOAD(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ +#define MG_TX1_PISO_READLOAD(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ MG_TX_PISO_READLOAD_TX1LN1_PORT1) @@ -1920,8 +2038,8 @@ enum i915_power_well_id { #define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC #define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC #define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define MG_TX2_PISO_READLOAD(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ +#define MG_TX2_PISO_READLOAD(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ MG_TX_PISO_READLOAD_TX2LN1_PORT1) #define CRI_CALCINIT (1 << 1) @@ -1934,8 +2052,8 @@ enum i915_power_well_id { #define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 #define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 #define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define MG_TX1_SWINGCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ +#define MG_TX1_SWINGCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ MG_TX_SWINGCTRL_TX1LN0_PORT2, \ MG_TX_SWINGCTRL_TX1LN1_PORT1) @@ -1947,8 +2065,8 @@ enum i915_power_well_id { #define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 #define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 #define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define MG_TX2_SWINGCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ +#define MG_TX2_SWINGCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ MG_TX_SWINGCTRL_TX2LN0_PORT2, \ MG_TX_SWINGCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) @@ -1962,8 +2080,8 @@ enum i915_power_well_id { #define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 #define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 #define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 -#define MG_TX1_DRVCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ +#define MG_TX1_DRVCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ MG_TX_DRVCTRL_TX1LN1_TXPORT1) @@ -1975,8 +2093,8 @@ enum i915_power_well_id { #define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 #define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 #define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define MG_TX2_DRVCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \ +#define MG_TX2_DRVCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ MG_TX_DRVCTRL_TX2LN0_PORT2, \ MG_TX_DRVCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) @@ -1995,8 +2113,8 @@ enum i915_power_well_id { #define MG_CLKHUB_LN1_PORT3 0x16A79C #define MG_CLKHUB_LN0_PORT4 0x16B39C #define MG_CLKHUB_LN1_PORT4 0x16B79C -#define MG_CLKHUB(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \ +#define MG_CLKHUB(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \ MG_CLKHUB_LN0_PORT2, \ MG_CLKHUB_LN1_PORT1) #define CFG_LOW_RATE_LKREN_EN (1 << 11) @@ -2009,8 +2127,8 @@ enum i915_power_well_id { #define MG_TX_DCC_TX1LN1_PORT3 0x16A510 #define MG_TX_DCC_TX1LN0_PORT4 0x16B110 #define MG_TX_DCC_TX1LN1_PORT4 0x16B510 -#define MG_TX1_DCC(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \ +#define MG_TX1_DCC(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \ MG_TX_DCC_TX1LN0_PORT2, \ MG_TX_DCC_TX1LN1_PORT1) #define MG_TX_DCC_TX2LN0_PORT1 0x168090 @@ -2021,8 +2139,8 @@ enum i915_power_well_id { #define MG_TX_DCC_TX2LN1_PORT3 0x16A490 #define MG_TX_DCC_TX2LN0_PORT4 0x16B090 #define MG_TX_DCC_TX2LN1_PORT4 0x16B490 -#define MG_TX2_DCC(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \ +#define MG_TX2_DCC(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \ MG_TX_DCC_TX2LN0_PORT2, \ MG_TX_DCC_TX2LN1_PORT1) #define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) @@ -2037,8 +2155,8 @@ enum i915_power_well_id { #define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 #define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 #define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 -#define MG_DP_MODE(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \ +#define MG_DP_MODE(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \ MG_DP_MODE_LN0_ACU_PORT2, \ MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) @@ -2450,12 +2568,12 @@ enum i915_power_well_id { #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ #define PWRCTX_EN (1 << 0) -#define IPEIR _MMIO(0x2088) -#define IPEHR _MMIO(0x208c) +#define IPEIR(base) _MMIO((base) + 0x88) +#define IPEHR(base) _MMIO((base) + 0x8c) #define GEN2_INSTDONE _MMIO(0x2090) #define NOPID _MMIO(0x2094) #define HWSTAM _MMIO(0x2098) -#define DMA_FADD_I8XX _MMIO(0x20d0) +#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) #define RING_BBSTATE(base) _MMIO((base) + 0x110) #define RING_BB_PPGTT (1 << 5) #define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ @@ -2592,10 +2710,6 @@ enum i915_power_well_id { #define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) -#define VLV_DISPLAY_BASE 0x180000 -#define VLV_MIPI_BASE VLV_DISPLAY_BASE -#define BXT_MIPI_BASE 0x60000 - #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) #define SCPD0 _MMIO(0x209c) /* 915+ only */ @@ -2633,7 +2747,7 @@ enum i915_power_well_id { #define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */ #define INSTPM_TLB_INVALIDATE (1 << 9) #define INSTPM_SYNC_FLUSH (1 << 5) -#define ACTHD _MMIO(0x20c8) +#define ACTHD(base) _MMIO((base) + 0xc8) #define MEM_MODE _MMIO(0x20cc) #define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */ #define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */ @@ -2777,6 +2891,9 @@ enum i915_power_well_id { #define GEN6_RCS_PWR_FSM _MMIO(0x22ac) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) +#define GEN10_CACHE_MODE_SS _MMIO(0xe420) +#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4) + /* Fuse readout registers for GT */ #define HSW_PAVP_FUSE1 _MMIO(0x911C) #define HSW_F1_EU_DIS_SHIFT 16 @@ -2836,7 +2953,7 @@ enum i915_power_well_id { #define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) #define GEN11_GT_VDBOX_DISABLE_MASK 0xff #define GEN11_GT_VEBOX_DISABLE_SHIFT 16 -#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) +#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) #define GEN11_EU_DISABLE _MMIO(0x9134) #define GEN11_EU_DIS_MASK 0xFF @@ -3152,9 +3269,9 @@ enum i915_power_well_id { /* * Clock control & power management */ -#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) -#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) -#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) +#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014) +#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018) +#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030) #define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) #define VGA0 _MMIO(0x6000) @@ -3251,9 +3368,9 @@ enum i915_power_well_id { #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) -#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) -#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) +#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c) +#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020) +#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c) #define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) /* @@ -3325,7 +3442,7 @@ enum i915_power_well_id { #define DSTATE_PLL_D3_OFF (1 << 3) #define DSTATE_GFX_CLOCK_GATING (1 << 1) #define DSTATE_DOT_CLOCK_GATING (1 << 0) -#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200) +#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -3465,7 +3582,7 @@ enum i915_power_well_id { #define _PALETTE_A 0xa000 #define _PALETTE_B 0xa800 #define _CHV_PALETTE_C 0xc000 -#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \ +#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ _PICK((pipe), _PALETTE_A, \ _PALETTE_B, _CHV_PALETTE_C) + \ (i) * 4) @@ -3830,7 +3947,7 @@ enum i915_power_well_id { /* * Logical Context regs */ -#define CCID _MMIO(0x2180) +#define CCID(base) _MMIO((base) + 0x180) #define CCID_EN BIT(0) #define CCID_EXTENDED_STATE_RESTORE BIT(2) #define CCID_EXTENDED_STATE_SAVE BIT(3) @@ -3962,6 +4079,15 @@ enum { /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A 0x60050 #define PIPE_CRC_ENABLE (1 << 31) +/* skl+ source selection */ +#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) +#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) +#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) +#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) +#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) +#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) +#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) +#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) /* ivb+ source selection */ #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) @@ -4141,6 +4267,7 @@ enum { #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) #define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) +#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */ #define EDP_PSR_TP1_TIME_500us (0 << 4) #define EDP_PSR_TP1_TIME_100us (1 << 4) #define EDP_PSR_TP1_TIME_2500us (2 << 4) @@ -4248,6 +4375,15 @@ enum { #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) #define EDP_PSR2_STATUS_STATE_SHIFT 28 +#define _PSR2_SU_STATUS_0 0x6F914 +#define _PSR2_SU_STATUS_1 0x6F918 +#define _PSR2_SU_STATUS_2 0x6F91C +#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1)) +#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3)) +#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) +#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) +#define PSR2_SU_STATUS_FRAMES 8 + /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) @@ -4298,7 +4434,7 @@ enum { /* Hotplug control (945+ only) */ -#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110) +#define PORT_HOTPLUG_EN _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110) #define PORTB_HOTPLUG_INT_EN (1 << 29) #define PORTC_HOTPLUG_INT_EN (1 << 28) #define PORTD_HOTPLUG_INT_EN (1 << 27) @@ -4328,7 +4464,7 @@ enum { #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) -#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) +#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114) /* * HDMI/DP bits are g4x+ * @@ -4410,7 +4546,7 @@ enum { #define PORT_DFT_I9XX _MMIO(0x61150) #define DC_BALANCE_RESET (1 << 25) -#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154) +#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) #define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ @@ -4576,13 +4712,14 @@ enum { #define VIDEO_DIP_ENABLE (1 << 31) #define VIDEO_DIP_PORT(port) ((port) << 29) #define VIDEO_DIP_PORT_MASK (3 << 29) -#define VIDEO_DIP_ENABLE_GCP (1 << 25) +#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */ #define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) -#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) +#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */ #define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_GAMUT (2 << 19) #define VIDEO_DIP_SELECT_SPD (3 << 19) #define VIDEO_DIP_SELECT_MASK (3 << 19) #define VIDEO_DIP_FREQ_ONCE (0 << 16) @@ -4617,18 +4754,17 @@ enum { #define _PP_STATUS 0x61200 #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) -#define PP_ON (1 << 31) +#define PP_ON REG_BIT(31) #define _PP_CONTROL_1 0xc7204 #define _PP_CONTROL_2 0xc7304 #define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \ _PP_CONTROL_2) -#define POWER_CYCLE_DELAY_MASK (0x1f << 4) -#define POWER_CYCLE_DELAY_SHIFT 4 -#define VDD_OVERRIDE_FORCE (1 << 3) -#define BACKLIGHT_ENABLE (1 << 2) -#define PWR_DOWN_ON_RESET (1 << 1) -#define PWR_STATE_TARGET (1 << 0) +#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) +#define VDD_OVERRIDE_FORCE REG_BIT(3) +#define BACKLIGHT_ENABLE REG_BIT(2) +#define PWR_DOWN_ON_RESET REG_BIT(1) +#define PWR_STATE_TARGET REG_BIT(0) /* * Indicates that all dependencies of the panel are on: * @@ -4636,66 +4772,56 @@ enum { * - pipe enabled * - LVDS/DVOB/DVOC on */ -#define PP_READY (1 << 30) -#define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_POWER_UP (1 << 28) -#define PP_SEQUENCE_POWER_DOWN (2 << 28) -#define PP_SEQUENCE_MASK (3 << 28) -#define PP_SEQUENCE_SHIFT 28 -#define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_MASK 0x0000000f -#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) -#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) -#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) -#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) -#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) -#define PP_SEQUENCE_STATE_RESET (0xf << 0) +#define PP_READY REG_BIT(30) +#define PP_SEQUENCE_MASK REG_GENMASK(29, 28) +#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0) +#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1) +#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2) +#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27) +#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0) +#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0) +#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1) +#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2) +#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3) +#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8) +#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9) +#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa) +#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb) +#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf) #define _PP_CONTROL 0x61204 #define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) -#define PANEL_UNLOCK_REGS (0xabcd << 16) -#define PANEL_UNLOCK_MASK (0xffff << 16) -#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0 -#define BXT_POWER_CYCLE_DELAY_SHIFT 4 -#define EDP_FORCE_VDD (1 << 3) -#define EDP_BLC_ENABLE (1 << 2) -#define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_OFF (0 << 0) -#define PANEL_POWER_ON (1 << 0) +#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16) +#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd) +#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) +#define EDP_FORCE_VDD REG_BIT(3) +#define EDP_BLC_ENABLE REG_BIT(2) +#define PANEL_POWER_RESET REG_BIT(1) +#define PANEL_POWER_ON REG_BIT(0) #define _PP_ON_DELAYS 0x61208 #define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) -#define PANEL_PORT_SELECT_SHIFT 30 -#define PANEL_PORT_SELECT_MASK (3 << 30) -#define PANEL_PORT_SELECT_LVDS (0 << 30) -#define PANEL_PORT_SELECT_DPA (1 << 30) -#define PANEL_PORT_SELECT_DPC (2 << 30) -#define PANEL_PORT_SELECT_DPD (3 << 30) -#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) -#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000 -#define PANEL_POWER_UP_DELAY_SHIFT 16 -#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff -#define PANEL_LIGHT_ON_DELAY_SHIFT 0 +#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30) +#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0) +#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1) +#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2) +#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3) +#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port) +#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16) +#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0) #define _PP_OFF_DELAYS 0x6120C #define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) -#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000 -#define PANEL_POWER_DOWN_DELAY_SHIFT 16 -#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff -#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 +#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16) +#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0) #define _PP_DIVISOR 0x61210 #define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) -#define PP_REFERENCE_DIVIDER_MASK 0xffffff00 -#define PP_REFERENCE_DIVIDER_SHIFT 8 -#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f -#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 +#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8) +#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0) /* Panel fitting */ -#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230) +#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230) #define PFIT_ENABLE (1 << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 @@ -4713,7 +4839,7 @@ enum { #define PFIT_SCALING_PROGRAMMED (1 << 26) #define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_LETTER (3 << 26) -#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234) +#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234) /* Pre-965 */ #define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_MASK 0xfff00000 @@ -4725,25 +4851,25 @@ enum { #define PFIT_HORIZ_SCALE_SHIFT_965 0 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff -#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238) +#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238) -#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) -#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) +#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250) +#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350) #define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ _VLV_BLC_PWM_CTL2_B) -#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) -#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) +#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254) +#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354) #define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ _VLV_BLC_PWM_CTL_B) -#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) -#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) +#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260) +#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360) #define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ _VLV_BLC_HIST_CTL_B) /* Backlight control */ -#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ +#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */ #define BLM_PWM_ENABLE (1 << 31) #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ #define BLM_PIPE_SELECT (1 << 29) @@ -4766,7 +4892,7 @@ enum { #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) #define BLM_PHASE_IN_INCR_SHIFT (0) #define BLM_PHASE_IN_INCR_MASK (0xff << 0) -#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254) +#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. @@ -4788,7 +4914,7 @@ enum { #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ -#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260) +#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260) #define BLM_HISTOGRAM_ENABLE (1 << 31) /* New registers for PCH-split platforms. Safe where new bits show up, the @@ -4863,6 +4989,7 @@ enum { # define TV_OVERSAMPLE_NONE (2 << 18) /* Selects 8x oversampling */ # define TV_OVERSAMPLE_8X (3 << 18) +# define TV_OVERSAMPLE_MASK (3 << 18) /* Selects progressive mode rather than interlaced */ # define TV_PROGRESSIVE (1 << 17) /* Sets the colorburst to PAL mode. Required for non-M PAL modes. */ @@ -5412,47 +5539,47 @@ enum { * is 20 bytes in each direction, hence the 5 fixed * data registers */ -#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010) -#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014) -#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018) -#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c) -#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020) -#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024) - -#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110) -#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114) -#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118) -#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c) -#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120) -#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124) - -#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210) -#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214) -#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218) -#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c) -#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220) -#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224) - -#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310) -#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314) -#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318) -#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c) -#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320) -#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324) - -#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410) -#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414) -#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418) -#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c) -#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420) -#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424) - -#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510) -#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514) -#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518) -#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c) -#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520) -#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524) +#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) +#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) +#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018) +#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c) +#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020) +#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024) + +#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) +#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) +#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118) +#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c) +#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120) +#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124) + +#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210) +#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214) +#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218) +#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c) +#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220) +#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224) + +#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310) +#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314) +#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318) +#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c) +#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320) +#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324) + +#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410) +#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414) +#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418) +#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c) +#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420) +#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424) + +#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510) +#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514) +#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518) +#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c) +#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520) +#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524) #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ @@ -5554,9 +5681,15 @@ enum { #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1 << 25) -#define PIPECONF_PALETTE 0 -#define PIPECONF_GAMMA (1 << 24) #define PIPECONF_FORCE_BORDER (1 << 25) +#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */ +#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */ +#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */ +#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */ +#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */ +#define PIPECONF_GAMMA_MODE_SHIFT 24 #define PIPECONF_INTERLACE_MASK (7 << 21) #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel @@ -5662,6 +5795,10 @@ enum { #define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) #define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT) +#define _PIPEAGCMAX 0x70010 +#define _PIPEBGCMAX 0x71010 +#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) + #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 #define PIPEMISC_YUV420_ENABLE (1 << 27) @@ -5677,6 +5814,12 @@ enum { #define PIPEMISC_DITHER_TYPE_SP (0 << 2) #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) +/* Skylake+ pipe bottom (background) color */ +#define _SKL_BOTTOM_COLOR_A 0x70034 +#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31) +#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30) +#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A) + #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN (1 << 29) #define PIPEB_HLINE_INT_EN (1 << 28) @@ -5728,7 +5871,7 @@ enum { #define DPINVGTT_STATUS_MASK 0xff #define DPINVGTT_STATUS_MASK_CHV 0xfff -#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030) +#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) @@ -5763,7 +5906,7 @@ enum { #define DSPARB_SPRITEF_MASK_VLV (0xff << 8) /* pnv/gen4/g4x/vlv/chv */ -#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034) +#define DSPFW1 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034) #define DSPFW_SR_SHIFT 23 #define DSPFW_SR_MASK (0x1ff << 23) #define DSPFW_CURSORB_SHIFT 16 @@ -5774,7 +5917,7 @@ enum { #define DSPFW_PLANEA_SHIFT 0 #define DSPFW_PLANEA_MASK (0x7f << 0) #define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */ -#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038) +#define DSPFW2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038) #define DSPFW_FBC_SR_EN (1 << 31) /* g4x */ #define DSPFW_FBC_SR_SHIFT 28 #define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */ @@ -5790,7 +5933,7 @@ enum { #define DSPFW_SPRITEA_SHIFT 0 #define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */ #define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */ -#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c) +#define DSPFW3 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c) #define DSPFW_HPLL_SR_EN (1 << 31) #define PINEVIEW_SELF_REFRESH_EN (1 << 30) #define DSPFW_CURSOR_SR_SHIFT 24 @@ -5956,9 +6099,10 @@ enum { #define _CUR_WM_TRANS_A_0 0x70168 #define _CUR_WM_TRANS_B_0 0x71168 #define PLANE_WM_EN (1 << 31) +#define PLANE_WM_IGNORE_LINES (1 << 30) #define PLANE_WM_LINES_SHIFT 14 #define PLANE_WM_LINES_MASK 0x1f -#define PLANE_WM_BLOCKS_MASK 0x3ff +#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */ #define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0) #define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level))) @@ -6082,7 +6226,7 @@ enum { #define MCURSOR_PIPE_SELECT_SHIFT 28 #define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) -#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) +#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ #define MCURSOR_ROTATE_180 (1 << 15) #define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define _CURABASE 0x70084 @@ -6137,7 +6281,7 @@ enum { #define DISPPLANE_RGBA888 (0xf << 26) #define DISPPLANE_STEREO_ENABLE (1 << 25) #define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) +#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ #define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT) #define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT) @@ -6206,35 +6350,35 @@ enum { * [10:1f] all * [30:32] all */ -#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) -#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) -#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) +#define SWF0(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4) +#define SWF1(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4) +#define SWF3(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4) #define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4) /* Pipe B */ -#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) -#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) -#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) +#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000) +#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) +#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024) #define _PIPEBFRAMEHIGH 0x71040 #define _PIPEBFRAMEPIXEL 0x71044 -#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040) -#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044) +#define _PIPEB_FRMCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71040) +#define _PIPEB_FLIPCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71044) /* Display B control */ -#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) +#define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180) #define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15) #define DISPPLANE_ALPHA_TRANS_DISABLE 0 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) -#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) -#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) -#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) -#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) -#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) -#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) -#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) -#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) +#define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184) +#define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188) +#define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C) +#define _DSPBSIZE (DISPLAY_MMIO_BASE(dev_priv) + 0x71190) +#define _DSPBSURF (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C) +#define _DSPBTILEOFF (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4) +#define _DSPBOFFSET (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4) +#define _DSPBSURFLIVE (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC) /* ICL DSI 0 and 1 */ #define _PIPEDSI0CONF 0x7b008 @@ -6515,13 +6659,22 @@ enum { #define PLANE_CTL_FORMAT_YUV422 (0 << 24) #define PLANE_CTL_FORMAT_NV12 (1 << 24) #define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) +#define PLANE_CTL_FORMAT_P010 (3 << 24) #define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) +#define PLANE_CTL_FORMAT_P012 (5 << 24) #define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) +#define PLANE_CTL_FORMAT_P016 (7 << 24) #define PLANE_CTL_FORMAT_AYUV (8 << 24) #define PLANE_CTL_FORMAT_INDEXED (12 << 24) #define PLANE_CTL_FORMAT_RGB_565 (14 << 24) #define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) #define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ +#define PLANE_CTL_FORMAT_Y210 (1 << 23) +#define PLANE_CTL_FORMAT_Y212 (3 << 23) +#define PLANE_CTL_FORMAT_Y216 (5 << 23) +#define PLANE_CTL_FORMAT_Y410 (7 << 23) +#define PLANE_CTL_FORMAT_Y412 (9 << 23) +#define PLANE_CTL_FORMAT_Y416 (0xb << 23) #define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) #define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) #define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) @@ -6742,8 +6895,7 @@ enum { #define _PLANE_BUF_CFG_1_B 0x7127c #define _PLANE_BUF_CFG_2_B 0x7137c -#define SKL_DDB_ENTRY_MASK 0x3FF -#define ICL_DDB_ENTRY_MASK 0x7FF +#define DDB_ENTRY_MASK 0x7FF /* skl+: 10 bits, icl+ 11 bits */ #define DDB_ENTRY_END_SHIFT 16 #define _PLANE_BUF_CFG_1(pipe) \ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B) @@ -7061,14 +7213,25 @@ enum { #define _LGC_PALETTE_B 0x4a800 #define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) +/* ilk/snb precision palette */ +#define _PREC_PALETTE_A 0x4b000 +#define _PREC_PALETTE_B 0x4c000 +#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4) + +#define _PREC_PIPEAGCMAX 0x4d000 +#define _PREC_PIPEBGCMAX 0x4d010 +#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4) + #define _GAMMA_MODE_A 0x4a480 #define _GAMMA_MODE_B 0x4ac80 #define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) -#define GAMMA_MODE_MODE_MASK (3 << 0) -#define GAMMA_MODE_MODE_8BIT (0 << 0) -#define GAMMA_MODE_MODE_10BIT (1 << 0) -#define GAMMA_MODE_MODE_12BIT (2 << 0) -#define GAMMA_MODE_MODE_SPLIT (3 << 0) +#define PRE_CSC_GAMMA_ENABLE (1 << 31) +#define POST_CSC_GAMMA_ENABLE (1 << 30) +#define GAMMA_MODE_MODE_MASK (3 << 0) +#define GAMMA_MODE_MODE_8BIT (0 << 0) +#define GAMMA_MODE_MODE_10BIT (1 << 0) +#define GAMMA_MODE_MODE_12BIT (2 << 0) +#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* DMC/CSR */ #define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4) @@ -7163,8 +7326,8 @@ enum { #define GEN8_GT_VECS_IRQ (1 << 6) #define GEN8_GT_GUC_IRQ (1 << 5) #define GEN8_GT_PM_IRQ (1 << 4) -#define GEN8_GT_VCS2_IRQ (1 << 3) -#define GEN8_GT_VCS1_IRQ (1 << 2) +#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */ +#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */ #define GEN8_GT_BCS_IRQ (1 << 1) #define GEN8_GT_RCS_IRQ (1 << 0) @@ -7185,8 +7348,8 @@ enum { #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 -#define GEN8_VCS1_IRQ_SHIFT 0 -#define GEN8_VCS2_IRQ_SHIFT 16 +#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ +#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */ #define GEN8_VECS_IRQ_SHIFT 0 #define GEN8_WD_IRQ_SHIFT 16 @@ -7572,12 +7735,13 @@ enum { #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) /*GEN11 chicken */ -#define _PIPEA_CHICKEN 0x70038 -#define _PIPEB_CHICKEN 0x71038 -#define _PIPEC_CHICKEN 0x72038 -#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) -#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ - _PIPEB_CHICKEN) +#define _PIPEA_CHICKEN 0x70038 +#define _PIPEB_CHICKEN 0x71038 +#define _PIPEC_CHICKEN 0x72038 +#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ + _PIPEB_CHICKEN) +#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) +#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) /* PCH */ @@ -8047,10 +8211,11 @@ enum { #define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 #define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) +#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) #define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) +#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) @@ -8786,7 +8951,7 @@ enum { #define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) /* Audio */ -#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) +#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 #define INTEL_AUDIO_DEVCTG 0x80862802 @@ -9201,7 +9366,7 @@ enum skl_power_gate { #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ _TRANS_DDI_FUNC_CTL2_A) #define PORT_SYNC_MODE_ENABLE (1 << 4) -#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) +#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0) #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 @@ -9521,7 +9686,7 @@ enum skl_power_gate { #define _MG_PLL3_ENABLE 0x46038 #define _MG_PLL4_ENABLE 0x4603C /* Bits are the same as DPLL0_ENABLE */ -#define MG_PLL_ENABLE(port) _MMIO_PORT((port) - PORT_C, _MG_PLL1_ENABLE, \ +#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \ _MG_PLL2_ENABLE) #define _MG_REFCLKIN_CTL_PORT1 0x16892C @@ -9530,9 +9695,9 @@ enum skl_power_gate { #define _MG_REFCLKIN_CTL_PORT4 0x16B92C #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) #define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) -#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \ - _MG_REFCLKIN_CTL_PORT1, \ - _MG_REFCLKIN_CTL_PORT2) +#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_REFCLKIN_CTL_PORT1, \ + _MG_REFCLKIN_CTL_PORT2) #define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 #define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 @@ -9542,9 +9707,9 @@ enum skl_power_gate { #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) -#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \ - _MG_CLKTOP2_CORECLKCTL1_PORT1, \ - _MG_CLKTOP2_CORECLKCTL1_PORT2) +#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_CORECLKCTL1_PORT1, \ + _MG_CLKTOP2_CORECLKCTL1_PORT2) #define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 #define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 @@ -9562,9 +9727,9 @@ enum skl_power_gate { #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) -#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ - _MG_CLKTOP2_HSCLKCTL_PORT1, \ - _MG_CLKTOP2_HSCLKCTL_PORT2) +#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_HSCLKCTL_PORT1, \ + _MG_CLKTOP2_HSCLKCTL_PORT2) #define _MG_PLL_DIV0_PORT1 0x168A00 #define _MG_PLL_DIV0_PORT2 0x169A00 @@ -9576,8 +9741,8 @@ enum skl_power_gate { #define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) #define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) #define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) -#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \ - _MG_PLL_DIV0_PORT2) +#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \ + _MG_PLL_DIV0_PORT2) #define _MG_PLL_DIV1_PORT1 0x168A04 #define _MG_PLL_DIV1_PORT2 0x169A04 @@ -9591,8 +9756,8 @@ enum skl_power_gate { #define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) #define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) #define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) -#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \ - _MG_PLL_DIV1_PORT2) +#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \ + _MG_PLL_DIV1_PORT2) #define _MG_PLL_LF_PORT1 0x168A08 #define _MG_PLL_LF_PORT2 0x169A08 @@ -9604,8 +9769,8 @@ enum skl_power_gate { #define MG_PLL_LF_GAINCTRL(x) ((x) << 16) #define MG_PLL_LF_INT_COEFF(x) ((x) << 8) #define MG_PLL_LF_PROP_COEFF(x) ((x) << 0) -#define MG_PLL_LF(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_LF_PORT1, \ - _MG_PLL_LF_PORT2) +#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \ + _MG_PLL_LF_PORT2) #define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C #define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C @@ -9617,9 +9782,9 @@ enum skl_power_gate { #define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10) #define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8) #define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0) -#define MG_PLL_FRAC_LOCK(port) _MMIO_PORT((port) - PORT_C, \ - _MG_PLL_FRAC_LOCK_PORT1, \ - _MG_PLL_FRAC_LOCK_PORT2) +#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_FRAC_LOCK_PORT1, \ + _MG_PLL_FRAC_LOCK_PORT2) #define _MG_PLL_SSC_PORT1 0x168A10 #define _MG_PLL_SSC_PORT2 0x169A10 @@ -9631,8 +9796,8 @@ enum skl_power_gate { #define MG_PLL_SSC_STEPNUM(x) ((x) << 10) #define MG_PLL_SSC_FLLEN (1 << 9) #define MG_PLL_SSC_STEPSIZE(x) ((x) << 0) -#define MG_PLL_SSC(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_SSC_PORT1, \ - _MG_PLL_SSC_PORT2) +#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \ + _MG_PLL_SSC_PORT2) #define _MG_PLL_BIAS_PORT1 0x168A14 #define _MG_PLL_BIAS_PORT2 0x169A14 @@ -9651,8 +9816,8 @@ enum skl_power_gate { #define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) #define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) #define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) -#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \ - _MG_PLL_BIAS_PORT2) +#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \ + _MG_PLL_BIAS_PORT2) #define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 #define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 @@ -9663,9 +9828,9 @@ enum skl_power_gate { #define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) #define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2) #define MG_PLL_TDC_TDCSEL(x) ((x) << 0) -#define MG_PLL_TDC_COLDST_BIAS(port) _MMIO_PORT((port) - PORT_C, \ - _MG_PLL_TDC_COLDST_BIAS_PORT1, \ - _MG_PLL_TDC_COLDST_BIAS_PORT2) +#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_TDC_COLDST_BIAS_PORT1, \ + _MG_PLL_TDC_COLDST_BIAS_PORT2) #define _CNL_DPLL0_CFGCR0 0x6C000 #define _CNL_DPLL1_CFGCR0 0x6C080 @@ -9699,7 +9864,7 @@ enum skl_power_gate { #define DPLL_CFGCR1_KDIV(x) ((x) << 6) #define DPLL_CFGCR1_KDIV_1 (1 << 6) #define DPLL_CFGCR1_KDIV_2 (2 << 6) -#define DPLL_CFGCR1_KDIV_4 (4 << 6) +#define DPLL_CFGCR1_KDIV_3 (4 << 6) #define DPLL_CFGCR1_PDIV_MASK (0xf << 2) #define DPLL_CFGCR1_PDIV_SHIFT (2) #define DPLL_CFGCR1_PDIV(x) ((x) << 2) @@ -9768,16 +9933,29 @@ enum skl_power_gate { #define BXT_DRAM_WIDTH_X64 (0x3 << 4) #define BXT_DRAM_SIZE_MASK (0x7 << 6) #define BXT_DRAM_SIZE_SHIFT 6 -#define BXT_DRAM_SIZE_4GB (0x0 << 6) -#define BXT_DRAM_SIZE_6GB (0x1 << 6) -#define BXT_DRAM_SIZE_8GB (0x2 << 6) -#define BXT_DRAM_SIZE_12GB (0x3 << 6) -#define BXT_DRAM_SIZE_16GB (0x4 << 6) +#define BXT_DRAM_SIZE_4GBIT (0x0 << 6) +#define BXT_DRAM_SIZE_6GBIT (0x1 << 6) +#define BXT_DRAM_SIZE_8GBIT (0x2 << 6) +#define BXT_DRAM_SIZE_12GBIT (0x3 << 6) +#define BXT_DRAM_SIZE_16GBIT (0x4 << 6) +#define BXT_DRAM_TYPE_MASK (0x7 << 22) +#define BXT_DRAM_TYPE_SHIFT 22 +#define BXT_DRAM_TYPE_DDR3 (0x0 << 22) +#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22) +#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22) +#define BXT_DRAM_TYPE_DDR4 (0x4 << 22) #define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666 #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04) #define SKL_REQ_DATA_MASK (0xF << 0) +#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000) +#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0) +#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0) +#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0) + #define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) #define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010) #define SKL_DRAM_S_SHIFT 16 @@ -9789,8 +9967,21 @@ enum skl_power_gate { #define SKL_DRAM_WIDTH_X32 (0x2 << 8) #define SKL_DRAM_RANK_MASK (0x1 << 10) #define SKL_DRAM_RANK_SHIFT 10 -#define SKL_DRAM_RANK_SINGLE (0x0 << 10) -#define SKL_DRAM_RANK_DUAL (0x1 << 10) +#define SKL_DRAM_RANK_1 (0x0 << 10) +#define SKL_DRAM_RANK_2 (0x1 << 10) +#define SKL_DRAM_RANK_MASK (0x1 << 10) +#define CNL_DRAM_SIZE_MASK 0x7F +#define CNL_DRAM_WIDTH_MASK (0x3 << 7) +#define CNL_DRAM_WIDTH_SHIFT 7 +#define CNL_DRAM_WIDTH_X8 (0x0 << 7) +#define CNL_DRAM_WIDTH_X16 (0x1 << 7) +#define CNL_DRAM_WIDTH_X32 (0x2 << 7) +#define CNL_DRAM_RANK_MASK (0x3 << 9) +#define CNL_DRAM_RANK_SHIFT 9 +#define CNL_DRAM_RANK_1 (0x0 << 9) +#define CNL_DRAM_RANK_2 (0x1 << 9) +#define CNL_DRAM_RANK_3 (0x2 << 9) +#define CNL_DRAM_RANK_4 (0x3 << 9) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, * since on HSW we can't write to it using I915_WRITE. */ @@ -9835,10 +10026,14 @@ enum skl_power_gate { #define _PIPE_A_CSC_COEFF_BU 0x4901c #define _PIPE_A_CSC_COEFF_RV_GV 0x49020 #define _PIPE_A_CSC_COEFF_BV 0x49024 + #define _PIPE_A_CSC_MODE 0x49028 -#define CSC_BLACK_SCREEN_OFFSET (1 << 2) -#define CSC_POSITION_BEFORE_GAMMA (1 << 1) -#define CSC_MODE_YUV_TO_RGB (1 << 0) +#define ICL_CSC_ENABLE (1 << 31) +#define ICL_OUTPUT_CSC_ENABLE (1 << 30) +#define CSC_BLACK_SCREEN_OFFSET (1 << 2) +#define CSC_POSITION_BEFORE_GAMMA (1 << 1) +#define CSC_MODE_YUV_TO_RGB (1 << 0) + #define _PIPE_A_CSC_PREOFF_HI 0x49030 #define _PIPE_A_CSC_PREOFF_ME 0x49034 #define _PIPE_A_CSC_PREOFF_LO 0x49038 @@ -9874,6 +10069,70 @@ enum skl_power_gate { #define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) #define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) +/* Pipe Output CSC */ +#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050 +#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054 +#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058 +#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c +#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060 +#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064 +#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068 +#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c +#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c + +#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150 +#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154 +#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158 +#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c +#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160 +#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164 +#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168 +#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c +#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c + +#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\ + _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\ + _PIPE_B_OUTPUT_CSC_COEFF_RY_GY) +#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BY, \ + _PIPE_B_OUTPUT_CSC_COEFF_BY) +#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \ + _PIPE_B_OUTPUT_CSC_COEFF_RU_GU) +#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BU, \ + _PIPE_B_OUTPUT_CSC_COEFF_BU) +#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \ + _PIPE_B_OUTPUT_CSC_COEFF_RV_GV) +#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BV, \ + _PIPE_B_OUTPUT_CSC_COEFF_BV) +#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_HI, \ + _PIPE_B_OUTPUT_CSC_PREOFF_HI) +#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_ME, \ + _PIPE_B_OUTPUT_CSC_PREOFF_ME) +#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_LO, \ + _PIPE_B_OUTPUT_CSC_PREOFF_LO) +#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_HI) +#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_ME) +#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_LO) + /* pipe degamma/gamma LUTs on IVB+ */ #define _PAL_PREC_INDEX_A 0x4A400 #define _PAL_PREC_INDEX_B 0x4AC00 @@ -9882,6 +10141,7 @@ enum skl_power_gate { #define PAL_PREC_SPLIT_MODE (1 << 31) #define PAL_PREC_AUTO_INCREMENT (1 << 15) #define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0) +#define PAL_PREC_INDEX_VALUE(x) ((x) << 0) #define _PAL_PREC_DATA_A 0x4A404 #define _PAL_PREC_DATA_B 0x4AC04 #define _PAL_PREC_DATA_C 0x4B404 @@ -9899,6 +10159,7 @@ enum skl_power_gate { #define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B) #define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4) #define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4) +#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4) #define _PRE_CSC_GAMC_INDEX_A 0x4A484 #define _PRE_CSC_GAMC_INDEX_B 0x4AC84 diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ca95ab2f4cfa..82094b9f5ba7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -22,13 +22,30 @@ * */ -#include <linux/prefetch.h> #include <linux/dma-fence-array.h> +#include <linux/irq_work.h> +#include <linux/prefetch.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> #include "i915_drv.h" +#include "i915_active.h" +#include "i915_globals.h" +#include "i915_reset.h" + +struct execute_cb { + struct list_head link; + struct irq_work work; + struct i915_sw_fence *fence; +}; + +static struct i915_global_request { + struct i915_global base; + struct kmem_cache *slab_requests; + struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_execute_cbs; +} global; static const char *i915_fence_get_driver_name(struct dma_fence *fence) { @@ -49,7 +66,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return "signaled"; - return to_request(fence)->timeline->name; + return to_request(fence)->gem_context->name ?: "[i915]"; } static bool i915_fence_signaled(struct dma_fence *fence) @@ -59,14 +76,16 @@ static bool i915_fence_signaled(struct dma_fence *fence) static bool i915_fence_enable_signaling(struct dma_fence *fence) { - return intel_engine_enable_signaling(to_request(fence), true); + return i915_request_enable_breadcrumb(to_request(fence)); } static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), interruptible, timeout); + return i915_request_wait(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } static void i915_fence_release(struct dma_fence *fence) @@ -82,7 +101,7 @@ static void i915_fence_release(struct dma_fence *fence) */ i915_sw_fence_fini(&rq->submit); - kmem_cache_free(rq->i915->requests, rq); + kmem_cache_free(global.slab_requests, rq); } const struct dma_fence_ops i915_fence_ops = { @@ -111,99 +130,10 @@ i915_request_remove_from_client(struct i915_request *request) spin_unlock(&file_priv->mm.lock); } -static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) +static void reserve_gt(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - struct i915_timeline *timeline; - enum intel_engine_id id; - int ret; - - /* Carefully retire all requests without writing to the rings */ - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - GEM_BUG_ON(i915->gt.active_requests); - - /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ - for_each_engine(engine, i915, id) { - GEM_TRACE("%s seqno %d (current %d) -> %d\n", - engine->name, - engine->timeline.seqno, - intel_engine_get_seqno(engine), - seqno); - - if (seqno == engine->timeline.seqno) - continue; - - kthread_park(engine->breadcrumbs.signaler); - - if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { - /* Flush any waiters before we reuse the seqno */ - intel_engine_disarm_breadcrumbs(engine); - intel_engine_init_hangcheck(engine); - GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals)); - } - - /* Check we are idle before we fiddle with hw state! */ - GEM_BUG_ON(!intel_engine_is_idle(engine)); - GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request)); - - /* Finally reset hw state */ - intel_engine_init_global_seqno(engine, seqno); - engine->timeline.seqno = seqno; - - kthread_unpark(engine->breadcrumbs.signaler); - } - - list_for_each_entry(timeline, &i915->gt.timelines, link) - memset(timeline->global_sync, 0, sizeof(timeline->global_sync)); - - i915->gt.request_serial = seqno; - - return 0; -} - -int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) -{ - struct drm_i915_private *i915 = to_i915(dev); - - lockdep_assert_held(&i915->drm.struct_mutex); - - if (seqno == 0) - return -EINVAL; - - /* HWS page needs to be set less than what we will inject to ring */ - return reset_all_global_seqno(i915, seqno - 1); -} - -static int reserve_gt(struct drm_i915_private *i915) -{ - int ret; - - /* - * Reservation is fine until we may need to wrap around - * - * By incrementing the serial for every request, we know that no - * individual engine may exceed that serial (as each is reset to 0 - * on any wrap). This protects even the most pessimistic of migrations - * of every request from all engines onto just one. - */ - while (unlikely(++i915->gt.request_serial == 0)) { - ret = reset_all_global_seqno(i915, 0); - if (ret) { - i915->gt.request_serial--; - return ret; - } - } - if (!i915->gt.active_requests++) i915_gem_unpark(i915); - - return 0; } static void unreserve_gt(struct drm_i915_private *i915) @@ -213,12 +143,6 @@ static void unreserve_gt(struct drm_i915_private *i915) i915_gem_park(i915); } -void i915_gem_retire_noop(struct i915_gem_active *active, - struct i915_request *request) -{ - /* Space left intentionally blank */ -} - static void advance_ring(struct i915_request *request) { struct intel_ring *ring = request->ring; @@ -243,7 +167,6 @@ static void advance_ring(struct i915_request *request) * is just about to be. Either works, if we miss the last two * noops - they are safe to be replayed on a reset. */ - GEM_TRACE("marking %s as inactive\n", ring->timeline->name); tail = READ_ONCE(request->tail); list_del(&ring->active_link); } else { @@ -270,11 +193,10 @@ static void free_capture_list(struct i915_request *request) static void __retire_engine_request(struct intel_engine_cs *engine, struct i915_request *rq) { - GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n", + GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n", __func__, engine->name, rq->fence.context, rq->fence.seqno, - rq->global_seqno, - intel_engine_get_seqno(engine)); + hwsp_seqno(rq)); GEM_BUG_ON(!i915_request_completed(rq)); @@ -286,10 +208,11 @@ static void __retire_engine_request(struct intel_engine_cs *engine, spin_unlock(&engine->timeline.lock); spin_lock(&rq->lock); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + i915_request_mark_complete(rq); + if (!i915_request_signaled(rq)) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) - intel_engine_cancel_signaling(rq); + i915_request_cancel_breadcrumb(rq); if (rq->waitboost) { GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); @@ -330,13 +253,12 @@ static void __retire_engine_upto(struct intel_engine_cs *engine, static void i915_request_retire(struct i915_request *request) { - struct i915_gem_active *active, *next; + struct i915_active_request *active, *next; - GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", request->engine->name, request->fence.context, request->fence.seqno, - request->global_seqno, - intel_engine_get_seqno(request->engine)); + hwsp_seqno(request)); lockdep_assert_held(&request->i915->drm.struct_mutex); GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); @@ -363,10 +285,10 @@ static void i915_request_retire(struct i915_request *request) * we may spend an inordinate amount of time simply handling * the retirement of requests and processing their callbacks. * Of which, this loop itself is particularly hot due to the - * cache misses when jumping around the list of i915_gem_active. - * So we try to keep this loop as streamlined as possible and - * also prefetch the next i915_gem_active to try and hide - * the likely cache miss. + * cache misses when jumping around the list of + * i915_active_request. So we try to keep this loop as + * streamlined as possible and also prefetch the next + * i915_active_request to try and hide the likely cache miss. */ prefetchw(next); @@ -378,15 +300,13 @@ static void i915_request_retire(struct i915_request *request) i915_request_remove_from_client(request); - /* Retirement decays the ban score as it is a sign of ctx progress */ - atomic_dec_if_positive(&request->gem_context->ban_score); intel_context_unpin(request->hw_context); __retire_engine_upto(request->engine, request); unreserve_gt(request->i915); - i915_sched_node_fini(request->i915, &request->sched); + i915_sched_node_fini(&request->sched); i915_request_put(request); } @@ -395,11 +315,10 @@ void i915_request_retire_upto(struct i915_request *rq) struct intel_ring *ring = rq->ring; struct i915_request *tmp; - GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", rq->engine->name, rq->fence.context, rq->fence.seqno, - rq->global_seqno, - intel_engine_get_seqno(rq->engine)); + hwsp_seqno(rq)); lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(!i915_request_completed(rq)); @@ -415,9 +334,67 @@ void i915_request_retire_upto(struct i915_request *rq) } while (tmp != rq); } -static u32 timeline_get_seqno(struct i915_timeline *tl) +static void irq_execute_cb(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); +} + +static void __notify_execute_cb(struct i915_request *rq) { - return ++tl->seqno; + struct execute_cb *cb; + + lockdep_assert_held(&rq->lock); + + if (list_empty(&rq->execute_cb)) + return; + + list_for_each_entry(cb, &rq->execute_cb, link) + irq_work_queue(&cb->work); + + /* + * XXX Rollback on __i915_request_unsubmit() + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + INIT_LIST_HEAD(&rq->execute_cb); +} + +static int +i915_request_await_execution(struct i915_request *rq, + struct i915_request *signal, + gfp_t gfp) +{ + struct execute_cb *cb; + + if (i915_request_is_active(signal)) + return 0; + + cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); + if (!cb) + return -ENOMEM; + + cb->fence = &rq->submit; + i915_sw_fence_await(cb->fence); + init_irq_work(&cb->work, irq_execute_cb); + + spin_lock_irq(&signal->lock); + if (i915_request_is_active(signal)) { + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); + } else { + list_add_tail(&cb->link, &signal->execute_cb); + } + spin_unlock_irq(&signal->lock); + + return 0; } static void move_to_timeline(struct i915_request *request, @@ -434,39 +411,39 @@ static void move_to_timeline(struct i915_request *request, void __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - u32 seqno; - GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n", + GEM_TRACE("%s fence %llx:%lld -> current %d\n", engine->name, request->fence.context, request->fence.seqno, - engine->timeline.seqno + 1, - intel_engine_get_seqno(engine)); + hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline.lock); - GEM_BUG_ON(request->global_seqno); - - seqno = timeline_get_seqno(&engine->timeline); - GEM_BUG_ON(!seqno); - GEM_BUG_ON(intel_engine_signaled(engine, seqno)); + if (i915_gem_context_is_banned(request->gem_context)) + i915_request_skip(request, -EIO); /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - request->global_seqno = seqno; - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) - intel_engine_enable_signaling(request, false); + + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); + + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && + !i915_request_enable_breadcrumb(request)) + intel_engine_queue_breadcrumbs(engine); + + __notify_execute_cb(request); + spin_unlock(&request->lock); - engine->emit_breadcrumb(request, - request->ring->vaddr + request->postfix); + engine->emit_fini_breadcrumb(request, + request->ring->vaddr + request->postfix); /* Transfer from per-context onto the global per-engine timeline */ move_to_timeline(request, &engine->timeline); trace_i915_request_execute(request); - - wake_up_all(&request->execute); } void i915_request_submit(struct i915_request *request) @@ -486,11 +463,10 @@ void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", engine->name, request->fence.context, request->fence.seqno, - request->global_seqno, - intel_engine_get_seqno(engine)); + hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline.lock); @@ -499,16 +475,25 @@ void __i915_request_unsubmit(struct i915_request *request) * Only unwind in reverse order, required so that the per-context list * is kept in seqno/ring order. */ - GEM_BUG_ON(!request->global_seqno); - GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); - GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); - engine->timeline.seqno--; /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - request->global_seqno = 0; + + /* + * As we do not allow WAIT to preempt inflight requests, + * once we have executed a request, along with triggering + * any execution callbacks, we must preserve its ordering + * within the non-preemptible FIFO. + */ + BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ + request->sched.attr.priority |= __NO_PREEMPTION; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) - intel_engine_cancel_signaling(request); + i915_request_cancel_breadcrumb(request); + + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); + spin_unlock(&request->lock); /* Transfer back from the global per-engine timeline to per-context */ @@ -566,6 +551,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } +static void ring_retire_requests(struct intel_ring *ring) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { + if (!i915_request_completed(rq)) + break; + + i915_request_retire(rq); + } +} + +static noinline struct i915_request * +i915_request_alloc_slow(struct intel_context *ce) +{ + struct intel_ring *ring = ce->ring; + struct i915_request *rq; + + if (list_empty(&ring->request_list)) + goto out; + + /* Ratelimit ourselves to prevent oom from malicious clients */ + rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); + cond_synchronize_rcu(rq->rcustate); + + /* Retire our old requests in the hope that we free some */ + ring_retire_requests(ring); + +out: + return kmem_cache_alloc(global.slab_requests, GFP_KERNEL); +} + +static int add_timeline_barrier(struct i915_request *rq) +{ + return i915_request_await_active_request(rq, &rq->timeline->barrier); +} + /** * i915_request_alloc - allocate a request structure * @@ -579,8 +601,10 @@ struct i915_request * i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq; struct intel_context *ce; + struct i915_timeline *tl; + struct i915_request *rq; + u32 seqno; int ret; lockdep_assert_held(&i915->drm.struct_mutex); @@ -596,8 +620,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ - if (i915_terminally_wedged(&i915->gpu_error)) - return ERR_PTR(-EIO); + ret = i915_terminally_wedged(i915); + if (ret) + return ERR_PTR(ret); /* * Pinning the contexts may generate requests in order to acquire @@ -608,13 +633,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) if (IS_ERR(ce)) return ERR_CAST(ce); - ret = reserve_gt(i915); - if (ret) - goto err_unpin; - - ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST); - if (ret) - goto err_unreserve; + reserve_gt(i915); + mutex_lock(&ce->ring->timeline->mutex); /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); @@ -628,7 +648,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * We use RCU to look up requests in flight. The lookups may * race with the request being allocated from the slab freelist. * That is the request we are writing to here, may be in the process - * of being read by __i915_gem_active_get_rcu(). As such, + * of being read by __i915_active_request_get_rcu(). As such, * we have to be very careful when overwriting the contents. During * the RCU lookup, we change chase the request->engine pointer, * read the request->global_seqno and increment the reference count. @@ -651,51 +671,45 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * * Do not use kmem_cache_zalloc() here! */ - rq = kmem_cache_alloc(i915->requests, + rq = kmem_cache_alloc(global.slab_requests, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(!rq)) { - i915_retire_requests(i915); - - /* Ratelimit ourselves to prevent oom from malicious clients */ - rq = i915_gem_active_raw(&ce->ring->timeline->last_request, - &i915->drm.struct_mutex); - if (rq) - cond_synchronize_rcu(rq->rcustate); - - rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); + rq = i915_request_alloc_slow(ce); if (!rq) { ret = -ENOMEM; goto err_unreserve; } } - rq->rcustate = get_state_synchronize_rcu(); - INIT_LIST_HEAD(&rq->active_list); + INIT_LIST_HEAD(&rq->execute_cb); + + tl = ce->ring->timeline; + ret = i915_timeline_get_seqno(tl, rq, &seqno); + if (ret) + goto err_free; + rq->i915 = i915; rq->engine = engine; rq->gem_context = ctx; rq->hw_context = ce; rq->ring = ce->ring; - rq->timeline = ce->ring->timeline; + rq->timeline = tl; GEM_BUG_ON(rq->timeline == &engine->timeline); + rq->hwsp_seqno = tl->hwsp_seqno; + rq->hwsp_cacheline = tl->hwsp_cacheline; + rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ spin_lock_init(&rq->lock); - dma_fence_init(&rq->fence, - &i915_fence_ops, - &rq->lock, - rq->timeline->fence_context, - timeline_get_seqno(rq->timeline)); + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, + tl->fence_context, seqno); /* We bump the ref for the fence chain */ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); - init_waitqueue_head(&rq->execute); i915_sched_node_init(&rq->sched); /* No zalloc, must clear what we need by hand */ - rq->global_seqno = 0; - rq->signaling.wait.seqno = 0; rq->file_priv = NULL; rq->batch = NULL; rq->capture_list = NULL; @@ -707,9 +721,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * i915_request_add() call can't fail. Note that the reserve may need * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. + * + * Note that due to how we add reserved_space to intel_ring_begin() + * we need to double our request to ensure that if we need to wrap + * around inside i915_request_add() there is sufficient space at + * the beginning of the ring as well. */ - rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; - GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); + rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32); /* * Record the position of the start of the request so that @@ -719,8 +737,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) */ rq->head = rq->ring->emit; - /* Unconditionally invalidate GPU caches and TLBs. */ - ret = engine->emit_flush(rq, EMIT_INVALIDATE); + ret = add_timeline_barrier(rq); if (ret) goto err_unwind; @@ -745,15 +762,70 @@ err_unwind: GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); - kmem_cache_free(i915->requests, rq); +err_free: + kmem_cache_free(global.slab_requests, rq); err_unreserve: + mutex_unlock(&ce->ring->timeline->mutex); unreserve_gt(i915); -err_unpin: intel_context_unpin(ce); return ERR_PTR(ret); } static int +emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + gfp_t gfp) +{ + u32 hwsp_offset; + u32 *cs; + int err; + + GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); + GEM_BUG_ON(INTEL_GEN(to->i915) < 8); + + /* Just emit the first semaphore we see as request space is limited. */ + if (to->sched.semaphores & from->engine->mask) + return i915_sw_fence_await_dma_fence(&to->submit, + &from->fence, 0, + I915_FENCE_GFP); + + /* We need to pin the signaler's HWSP until we are finished reading. */ + err = i915_timeline_read_hwsp(from, to, &hwsp_offset); + if (err) + return err; + + /* Only submit our spinner after the signaler is running! */ + err = i915_request_await_execution(to, from, gfp); + if (err) + return err; + + cs = intel_ring_begin(to, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Using greater-than-or-equal here means we have to worry + * about seqno wraparound. To side step that issue, we swap + * the timeline HWSP upon wrapping, so that everyone listening + * for the old (pre-wrap) values do not see the much smaller + * (post-wrap) values than they were expecting (and so wait + * forever). + */ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = from->fence.seqno; + *cs++ = hwsp_offset; + *cs++ = 0; + + intel_ring_advance(to, cs); + to->sched.semaphores |= from->engine->mask; + to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; + return 0; +} + +static int i915_request_await_request(struct i915_request *to, struct i915_request *from) { int ret; @@ -765,9 +837,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) return 0; if (to->engine->schedule) { - ret = i915_sched_node_add_dependency(to->i915, - &to->sched, - &from->sched); + ret = i915_sched_node_add_dependency(&to->sched, &from->sched); if (ret < 0) return ret; } @@ -776,34 +846,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); - return ret < 0 ? ret : 0; - } - - if (to->engine->semaphore.sync_to) { - u32 seqno; - - GEM_BUG_ON(!from->engine->semaphore.signal); - - seqno = i915_request_global_seqno(from); - if (!seqno) - goto await_dma_fence; - - if (seqno <= to->timeline->global_sync[from->engine->id]) - return 0; - - trace_i915_gem_ring_sync_to(to, from); - ret = to->engine->semaphore.sync_to(to, from); - if (ret) - return ret; - - to->timeline->global_sync[from->engine->id] = seqno; - return 0; + } else if (intel_engine_has_semaphores(to->engine) && + to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { + ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); + } else { + ret = i915_sw_fence_await_dma_fence(&to->submit, + &from->fence, 0, + I915_FENCE_GFP); } -await_dma_fence: - ret = i915_sw_fence_await_dma_fence(&to->submit, - &from->fence, 0, - I915_FENCE_GFP); return ret < 0 ? ret : 0; } @@ -948,6 +999,60 @@ void i915_request_skip(struct i915_request *rq, int error) memset(vaddr + head, 0, rq->postfix - head); } +static struct i915_request * +__i915_request_add_to_timeline(struct i915_request *rq) +{ + struct i915_timeline *timeline = rq->timeline; + struct i915_request *prev; + + /* + * Dependency tracking and request ordering along the timeline + * is special cased so that we can eliminate redundant ordering + * operations while building the request (we know that the timeline + * itself is ordered, and here we guarantee it). + * + * As we know we will need to emit tracking along the timeline, + * we embed the hooks into our request struct -- at the cost of + * having to have specialised no-allocation interfaces (which will + * be beneficial elsewhere). + * + * A second benefit to open-coding i915_request_await_request is + * that we can apply a slight variant of the rules specialised + * for timelines that jump between engines (such as virtual engines). + * If we consider the case of virtual engine, we must emit a dma-fence + * to prevent scheduling of the second request until the first is + * complete (to maximise our greedy late load balancing) and this + * precludes optimising to use semaphores serialisation of a single + * timeline across engines. + */ + prev = i915_active_request_raw(&timeline->last_request, + &rq->i915->drm.struct_mutex); + if (prev && !i915_request_completed(prev)) { + if (is_power_of_2(prev->engine->mask | rq->engine->mask)) + i915_sw_fence_await_sw_fence(&rq->submit, + &prev->submit, + &rq->submitq); + else + __i915_sw_fence_await_dma_fence(&rq->submit, + &prev->fence, + &rq->dmaq); + if (rq->engine->schedule) + __i915_sched_node_add_dependency(&rq->sched, + &prev->sched, + &rq->dep, + 0); + } + + spin_lock_irq(&timeline->lock); + list_add_tail(&rq->link, &timeline->requests); + spin_unlock_irq(&timeline->lock); + + GEM_BUG_ON(timeline->seqno != rq->fence.seqno); + __i915_active_request_set(&timeline->last_request, rq); + + return prev; +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -961,10 +1066,10 @@ void i915_request_add(struct i915_request *request) struct i915_request *prev; u32 *cs; - GEM_TRACE("%s fence %llx:%d\n", + GEM_TRACE("%s fence %llx:%lld\n", engine->name, request->fence.context, request->fence.seqno); - lockdep_assert_held(&request->i915->drm.struct_mutex); + lockdep_assert_held(&request->timeline->mutex); trace_i915_request_add(request); /* @@ -979,8 +1084,8 @@ void i915_request_add(struct i915_request *request) * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ + GEM_BUG_ON(request->reserved_space > request->ring->space); request->reserved_space = 0; - engine->emit_flush(request, EMIT_FLUSH); /* * Record the position of the start of the breadcrumb so that @@ -988,41 +1093,16 @@ void i915_request_add(struct i915_request *request) * GPU processing the request, we never over-estimate the * position of the ring's HEAD. */ - cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); + cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw); GEM_BUG_ON(IS_ERR(cs)); request->postfix = intel_ring_offset(request, cs); - /* - * Seal the request and mark it as pending execution. Note that - * we may inspect this state, without holding any locks, during - * hangcheck. Hence we apply the barrier to ensure that we do not - * see a more recent value in the hws than we are tracking. - */ - - prev = i915_gem_active_raw(&timeline->last_request, - &request->i915->drm.struct_mutex); - if (prev && !i915_request_completed(prev)) { - i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, - &request->submitq); - if (engine->schedule) - __i915_sched_node_add_dependency(&request->sched, - &prev->sched, - &request->dep, - 0); - } - - spin_lock_irq(&timeline->lock); - list_add_tail(&request->link, &timeline->requests); - spin_unlock_irq(&timeline->lock); - - GEM_BUG_ON(timeline->seqno != request->fence.seqno); - i915_gem_active_set(&timeline->last_request, request); + prev = __i915_request_add_to_timeline(request); list_add_tail(&request->ring_link, &ring->request_list); - if (list_is_first(&request->ring_link, &ring->request_list)) { - GEM_TRACE("marking %s as active\n", ring->timeline->name); + if (list_is_first(&request->ring_link, &ring->request_list)) list_add(&ring->active_link, &request->i915->gt.active_rings); - } + request->i915->gt.active_engines |= request->engine->mask; request->emitted_jiffies = jiffies; /* @@ -1042,12 +1122,27 @@ void i915_request_add(struct i915_request *request) struct i915_sched_attr attr = request->gem_context->sched; /* + * Boost actual workloads past semaphores! + * + * With semaphores we spin on one engine waiting for another, + * simply to reduce the latency of starting our work when + * the signaler completes. However, if there is any other + * work that we could be doing on this engine instead, that + * is better utilisation and will reduce the overall duration + * of the current work. To avoid PI boosting a semaphore + * far in the distance past over useful work, we keep a history + * of any semaphore use along our dependency chain. + */ + if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) + attr.priority |= I915_PRIORITY_NOSEMAPHORE; + + /* * Boost priorities to new clients (new request flows). * * Allow interactive/synchronous clients to jump ahead of * the bulk clients. (FQ_CODEL) */ - if (!prev || i915_request_completed(prev)) + if (list_empty(&request->sched.signalers_list)) attr.priority |= I915_PRIORITY_NEWCLIENT; engine->schedule(request, &attr); @@ -1075,6 +1170,8 @@ void i915_request_add(struct i915_request *request) */ if (prev && i915_request_completed(prev)) i915_request_retire_upto(prev); + + mutex_unlock(&request->timeline->mutex); } static unsigned long local_clock_us(unsigned int *cpu) @@ -1110,13 +1207,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) return this_cpu != cpu; } -static bool __i915_spin_request(const struct i915_request *rq, - u32 seqno, int state, unsigned long timeout_us) +static bool __i915_spin_request(const struct i915_request * const rq, + int state, unsigned long timeout_us) { - struct intel_engine_cs *engine = rq->engine; - unsigned int irq, cpu; - - GEM_BUG_ON(!seqno); + unsigned int cpu; /* * Only wait for the request if we know it is likely to complete. @@ -1124,12 +1218,12 @@ static bool __i915_spin_request(const struct i915_request *rq, * We don't track the timestamps around requests, nor the average * request length, so we do not have a good indicator that this * request will complete within the timeout. What we do know is the - * order in which requests are executed by the engine and so we can - * tell if the request has started. If the request hasn't started yet, - * it is a fair assumption that it will not complete within our - * relatively short timeout. + * order in which requests are executed by the context and so we can + * tell if the request has been started. If the request is not even + * running yet, it is a fair assumption that it will not complete + * within our relatively short timeout. */ - if (!intel_engine_has_started(engine, seqno)) + if (!i915_request_is_running(rq)) return false; /* @@ -1143,20 +1237,10 @@ static bool __i915_spin_request(const struct i915_request *rq, * takes to sleep on a request, on the order of a microsecond. */ - irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { - if (intel_engine_has_completed(engine, seqno)) - return seqno == i915_request_global_seqno(rq); - - /* - * Seqno are meant to be ordered *before* the interrupt. If - * we see an interrupt without a corresponding seqno advance, - * assume we won't see one in the near future but require - * the engine->seqno_barrier() to fixup coherency. - */ - if (READ_ONCE(engine->breadcrumbs.irq_count) != irq) - break; + if (i915_request_completed(rq)) + return true; if (signal_pending_state(state, current)) break; @@ -1170,16 +1254,16 @@ static bool __i915_spin_request(const struct i915_request *rq, return false; } -static bool __i915_wait_request_check_and_reset(struct i915_request *request) -{ - struct i915_gpu_error *error = &request->i915->gpu_error; +struct request_wait { + struct dma_fence_cb cb; + struct task_struct *tsk; +}; - if (likely(!i915_reset_handoff(error))) - return false; +static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct request_wait *wait = container_of(cb, typeof(*wait), cb); - __set_current_state(TASK_RUNNING); - i915_reset(request->i915, error->stalled_mask, error->reason); - return true; + wake_up_process(wait->tsk); } /** @@ -1207,17 +1291,9 @@ long i915_request_wait(struct i915_request *rq, { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; - DEFINE_WAIT_FUNC(reset, default_wake_function); - DEFINE_WAIT_FUNC(exec, default_wake_function); - struct intel_wait wait; + struct request_wait wait; might_sleep(); -#if IS_ENABLED(CONFIG_LOCKDEP) - GEM_BUG_ON(debug_locks && - !!lockdep_is_held(&rq->i915->drm.struct_mutex) != - !!(flags & I915_WAIT_LOCKED)); -#endif GEM_BUG_ON(timeout < 0); if (i915_request_completed(rq)) @@ -1228,57 +1304,38 @@ long i915_request_wait(struct i915_request *rq, trace_i915_request_wait_begin(rq, flags); - add_wait_queue(&rq->execute, &exec); - if (flags & I915_WAIT_LOCKED) - add_wait_queue(errq, &reset); + /* Optimistic short spin before touching IRQs */ + if (__i915_spin_request(rq, state, 5)) + goto out; - intel_wait_init(&wait); - if (flags & I915_WAIT_PRIORITY) + /* + * This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we sleep. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). + */ + if (flags & I915_WAIT_PRIORITY) { + if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) + gen6_rps_boost(rq); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); + } -restart: - do { - set_current_state(state); - if (intel_wait_update_request(&wait, rq)) - break; - - if (flags & I915_WAIT_LOCKED && - __i915_wait_request_check_and_reset(rq)) - continue; - - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - goto complete; - } - - if (!timeout) { - timeout = -ETIME; - goto complete; - } - - timeout = io_schedule_timeout(timeout); - } while (1); - - GEM_BUG_ON(!intel_wait_has_seqno(&wait)); - GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); - - /* Optimistic short spin before touching IRQs */ - if (__i915_spin_request(rq, wait.seqno, state, 5)) - goto complete; + wait.tsk = current; + if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) + goto out; - set_current_state(state); - if (intel_engine_add_wait(rq->engine, &wait)) - /* - * In order to check that we haven't missed the interrupt - * as we enabled it, we need to kick ourselves to do a - * coherent check on the seqno before we sleep. - */ - goto wakeup; + for (;;) { + set_current_state(state); - if (flags & I915_WAIT_LOCKED) - __i915_wait_request_check_and_reset(rq); + if (i915_request_completed(rq)) + break; - for (;;) { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; @@ -1290,70 +1347,14 @@ restart: } timeout = io_schedule_timeout(timeout); - - if (intel_wait_complete(&wait) && - intel_wait_check_request(&wait, rq)) - break; - - set_current_state(state); - -wakeup: - /* - * Carefully check if the request is complete, giving time - * for the seqno to be visible following the interrupt. - * We also have to check in case we are kicked by the GPU - * reset in order to drop the struct_mutex. - */ - if (__i915_request_irq_complete(rq)) - break; - - /* - * If the GPU is hung, and we hold the lock, reset the GPU - * and then check for completion. On a full reset, the engine's - * HW seqno will be advanced passed us and we are complete. - * If we do a partial reset, we have to wait for the GPU to - * resume and update the breadcrumb. - * - * If we don't hold the mutex, we can just wait for the worker - * to come along and update the breadcrumb (either directly - * itself, or indirectly by recovering the GPU). - */ - if (flags & I915_WAIT_LOCKED && - __i915_wait_request_check_and_reset(rq)) - continue; - - /* Only spin if we know the GPU is processing this request */ - if (__i915_spin_request(rq, wait.seqno, state, 2)) - break; - - if (!intel_wait_check_request(&wait, rq)) { - intel_engine_remove_wait(rq->engine, &wait); - goto restart; - } } - - intel_engine_remove_wait(rq->engine, &wait); -complete: __set_current_state(TASK_RUNNING); - if (flags & I915_WAIT_LOCKED) - remove_wait_queue(errq, &reset); - remove_wait_queue(&rq->execute, &exec); - trace_i915_request_wait_end(rq); - - return timeout; -} -static void ring_retire_requests(struct intel_ring *ring) -{ - struct i915_request *request, *next; - - list_for_each_entry_safe(request, next, - &ring->request_list, ring_link) { - if (!i915_request_completed(request)) - break; + dma_fence_remove_callback(&rq->fence, &wait.cb); - i915_request_retire(request); - } +out: + trace_i915_request_wait_end(rq); + return timeout; } void i915_retire_requests(struct drm_i915_private *i915) @@ -1365,11 +1366,66 @@ void i915_retire_requests(struct drm_i915_private *i915) if (!i915->gt.active_requests) return; - list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link) + list_for_each_entry_safe(ring, tmp, + &i915->gt.active_rings, active_link) { + intel_ring_get(ring); /* last rq holds reference! */ ring_retire_requests(ring); + intel_ring_put(ring); + } } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_request.c" #include "selftests/i915_request.c" #endif + +static void i915_global_request_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_execute_cbs); + kmem_cache_shrink(global.slab_requests); +} + +static void i915_global_request_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_execute_cbs); + kmem_cache_destroy(global.slab_requests); +} + +static struct i915_global_request global = { { + .shrink = i915_global_request_shrink, + .exit = i915_global_request_exit, +} }; + +int __init i915_global_request_init(void) +{ + global.slab_requests = KMEM_CACHE(i915_request, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_requests) + return -ENOMEM; + + global.slab_execute_cbs = KMEM_CACHE(execute_cb, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_execute_cbs) + goto err_requests; + + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT); + if (!global.slab_dependencies) + goto err_execute_cbs; + + i915_global_register(&global.base); + return 0; + +err_execute_cbs: + kmem_cache_destroy(global.slab_execute_cbs); +err_requests: + kmem_cache_destroy(global.slab_requests); + return -ENOMEM; +} diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 90e9d170a0cd..cd6c130964cd 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -29,8 +29,8 @@ #include "i915_gem.h" #include "i915_scheduler.h" +#include "i915_selftest.h" #include "i915_sw_fence.h" -#include "i915_scheduler.h" #include <uapi/drm/i915_drm.h> @@ -38,24 +38,36 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; struct i915_timeline; - -struct intel_wait { - struct rb_node node; - struct task_struct *tsk; - struct i915_request *request; - u32 seqno; -}; - -struct intel_signal_node { - struct intel_wait wait; - struct list_head link; -}; +struct i915_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; struct i915_vma *vma; }; +enum { + /* + * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. + * + * Set by __i915_request_submit() on handing over to HW, and cleared + * by __i915_request_unsubmit() if we preempt this request. + * + * Finally cleared for consistency on retiring the request, when + * we know the HW is no longer running this request. + * + * See i915_request_is_active() + */ + I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, + + /* + * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list + * + * Internal bookkeeping used by the breadcrumb code to track when + * a request is on the various signal_list. + */ + I915_FENCE_FLAG_SIGNAL, +}; + /** * Request queue structure. * @@ -98,7 +110,7 @@ struct i915_request { struct intel_context *hw_context; struct intel_ring *ring; struct i915_timeline *timeline; - struct intel_signal_node signaling; + struct list_head signal_link; /* * The rcu epoch of when this request was allocated. Used to judiciously @@ -116,8 +128,11 @@ struct i915_request { * It is used by the driver to then queue the request for execution. */ struct i915_sw_fence submit; - wait_queue_entry_t submitq; - wait_queue_head_t execute; + union { + wait_queue_entry_t submitq; + struct i915_sw_dma_fence_cb dmaq; + }; + struct list_head execute_cb; /* * A list of everyone we wait upon, and everyone who waits upon us. @@ -131,13 +146,22 @@ struct i915_request { struct i915_sched_node sched; struct i915_dependency dep; - /** - * GEM sequence number associated with this request on the - * global execution timeline. It is zero when the request is not - * on the HW queue (i.e. not on the engine timeline list). - * Its value is guarded by the timeline spinlock. + /* + * A convenience pointer to the current breadcrumb value stored in + * the HW status page (or our timeline's local equivalent). The full + * path would be rq->hw_context->ring->timeline->hwsp_seqno. */ - u32 global_seqno; + const u32 *hwsp_seqno; + + /* + * If we need to access the timeline's seqno for this request in + * another request, we need to keep a read reference to this associated + * cacheline, so that we do not free and recycle it before the foreign + * observers have completed. Hence, we keep a pointer to the cacheline + * inside the timeline's HWSP vma, but it is only valid while this + * request has not completed and guarded by the timeline mutex. + */ + struct i915_timeline_cacheline *hwsp_cacheline; /** Position in the ring of the start of the request */ u32 head; @@ -188,6 +212,11 @@ struct i915_request { struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_link; + + I915_SELFTEST_DECLARE(struct { + struct list_head link; + unsigned long delay; + } mock;) }; #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) @@ -231,30 +260,6 @@ i915_request_put(struct i915_request *rq) dma_fence_put(&rq->fence); } -/** - * i915_request_global_seqno - report the current global seqno - * @request - the request - * - * A request is assigned a global seqno only when it is on the hardware - * execution queue. The global seqno can be used to maintain a list of - * requests on the same engine in retirement order, for example for - * constructing a priority queue for waiting. Prior to its execution, or - * if it is subsequently removed in the event of preemption, its global - * seqno is zero. As both insertion and removal from the execution queue - * may operate in IRQ context, it is not guarded by the usual struct_mutex - * BKL. Instead those relying on the global seqno must be prepared for its - * value to change between reads. Only when the request is complete can - * the global seqno be stable (due to the memory barriers on submitting - * the commands to the hardware to write the breadcrumb, if the HWS shows - * that it has passed the global seqno and the global seqno is unchanged - * after the read, it is indeed complete). - */ -static u32 -i915_request_global_seqno(const struct i915_request *request) -{ - return READ_ONCE(request->global_seqno); -} - int i915_request_await_object(struct i915_request *to, struct drm_i915_gem_object *obj, bool write); @@ -271,6 +276,10 @@ void i915_request_skip(struct i915_request *request, int error); void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); +/* Note: part of the intel_breadcrumbs family */ +bool i915_request_enable_breadcrumb(struct i915_request *request); +void i915_request_cancel_breadcrumb(struct i915_request *request); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) @@ -281,441 +290,123 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */ #define I915_WAIT_FOR_IDLE_BOOST BIT(4) -static inline bool intel_engine_has_started(struct intel_engine_cs *engine, - u32 seqno); -static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, - u32 seqno); - -/** - * Returns true if seq1 is later than seq2. - */ -static inline bool i915_seqno_passed(u32 seq1, u32 seq2) +static inline bool i915_request_signaled(const struct i915_request *rq) { - return (s32)(seq1 - seq2) >= 0; + /* The request may live longer than its HWSP, so check flags first! */ + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); } -/** - * i915_request_started - check if the request has begun being executed - * @rq: the request - * - * Returns true if the request has been submitted to hardware, and the hardware - * has advanced passed the end of the previous request and so should be either - * currently processing the request (though it may be preempted and so - * not necessarily the next request to complete) or have completed the request. - */ -static inline bool i915_request_started(const struct i915_request *rq) +static inline bool i915_request_is_active(const struct i915_request *rq) { - u32 seqno; - - seqno = i915_request_global_seqno(rq); - if (!seqno) /* not yet submitted to HW */ - return false; - - return intel_engine_has_started(rq->engine, seqno); + return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); } -static inline bool -__i915_request_completed(const struct i915_request *rq, u32 seqno) -{ - GEM_BUG_ON(!seqno); - return intel_engine_has_completed(rq->engine, seqno) && - seqno == i915_request_global_seqno(rq); -} - -static inline bool i915_request_completed(const struct i915_request *rq) -{ - u32 seqno; - - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; - - return __i915_request_completed(rq, seqno); -} - -void i915_retire_requests(struct drm_i915_private *i915); - -/* - * We treat requests as fences. This is not be to confused with our - * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. - * We use the fences to synchronize access from the CPU with activity on the - * GPU, for example, we should not rewrite an object's PTE whilst the GPU - * is reading them. We also track fences at a higher level to provide - * implicit synchronisation around GEM objects, e.g. set-domain will wait - * for outstanding GPU rendering before marking the object ready for CPU - * access, or a pageflip will wait until the GPU is complete before showing - * the frame on the scanout. - * - * In order to use a fence, the object must track the fence it needs to - * serialise with. For example, GEM objects want to track both read and - * write access so that we can perform concurrent read operations between - * the CPU and GPU engines, as well as waiting for all rendering to - * complete, or waiting for the last GPU user of a "fence register". The - * object then embeds a #i915_gem_active to track the most recent (in - * retirement order) request relevant for the desired mode of access. - * The #i915_gem_active is updated with i915_gem_active_set() to track the - * most recent fence request, typically this is done as part of - * i915_vma_move_to_active(). - * - * When the #i915_gem_active completes (is retired), it will - * signal its completion to the owner through a callback as well as mark - * itself as idle (i915_gem_active.request == NULL). The owner - * can then perform any action, such as delayed freeing of an active - * resource including itself. - */ -struct i915_gem_active; - -typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, - struct i915_request *); - -struct i915_gem_active { - struct i915_request __rcu *request; - struct list_head link; - i915_gem_retire_fn retire; -}; - -void i915_gem_retire_noop(struct i915_gem_active *, - struct i915_request *request); - /** - * init_request_active - prepares the activity tracker for use - * @active - the active tracker - * @func - a callback when then the tracker is retired (becomes idle), - * can be NULL - * - * init_request_active() prepares the embedded @active struct for use as - * an activity tracker, that is for tracking the last known active request - * associated with it. When the last request becomes idle, when it is retired - * after completion, the optional callback @func is invoked. - */ -static inline void -init_request_active(struct i915_gem_active *active, - i915_gem_retire_fn retire) -{ - RCU_INIT_POINTER(active->request, NULL); - INIT_LIST_HEAD(&active->link); - active->retire = retire ?: i915_gem_retire_noop; -} - -/** - * i915_gem_active_set - updates the tracker to watch the current request - * @active - the active tracker - * @request - the request to watch - * - * i915_gem_active_set() watches the given @request for completion. Whilst - * that @request is busy, the @active reports busy. When that @request is - * retired, the @active tracker is updated to report idle. - */ -static inline void -i915_gem_active_set(struct i915_gem_active *active, - struct i915_request *request) -{ - list_move(&active->link, &request->active_list); - rcu_assign_pointer(active->request, request); -} - -/** - * i915_gem_active_set_retire_fn - updates the retirement callback - * @active - the active tracker - * @fn - the routine called when the request is retired - * @mutex - struct_mutex used to guard retirements - * - * i915_gem_active_set_retire_fn() updates the function pointer that - * is called when the final request associated with the @active tracker - * is retired. + * Returns true if seq1 is later than seq2. */ -static inline void -i915_gem_active_set_retire_fn(struct i915_gem_active *active, - i915_gem_retire_fn fn, - struct mutex *mutex) +static inline bool i915_seqno_passed(u32 seq1, u32 seq2) { - lockdep_assert_held(mutex); - active->retire = fn ?: i915_gem_retire_noop; + return (s32)(seq1 - seq2) >= 0; } -static inline struct i915_request * -__i915_gem_active_peek(const struct i915_gem_active *active) +static inline u32 __hwsp_seqno(const struct i915_request *rq) { - /* - * Inside the error capture (running with the driver in an unknown - * state), we want to bend the rules slightly (a lot). - * - * Work is in progress to make it safer, in the meantime this keeps - * the known issue from spamming the logs. - */ - return rcu_dereference_protected(active->request, 1); + return READ_ONCE(*rq->hwsp_seqno); } /** - * i915_gem_active_raw - return the active request - * @active - the active tracker + * hwsp_seqno - the current breadcrumb value in the HW status page + * @rq: the request, to chase the relevant HW status page * - * i915_gem_active_raw() returns the current request being tracked, or NULL. - * It does not obtain a reference on the request for the caller, so the caller - * must hold struct_mutex. - */ -static inline struct i915_request * -i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) -{ - return rcu_dereference_protected(active->request, - lockdep_is_held(mutex)); -} - -/** - * i915_gem_active_peek - report the active request being monitored - * @active - the active tracker + * The emphasis in naming here is that hwsp_seqno() is not a property of the + * request, but an indication of the current HW state (associated with this + * request). Its value will change as the GPU executes more requests. * - * i915_gem_active_peek() returns the current request being tracked if - * still active, or NULL. It does not obtain a reference on the request - * for the caller, so the caller must hold struct_mutex. + * Returns the current breadcrumb value in the associated HW status page (or + * the local timeline's equivalent) for this request. The request itself + * has the associated breadcrumb value of rq->fence.seqno, when the HW + * status page has that breadcrumb or later, this request is complete. */ -static inline struct i915_request * -i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) +static inline u32 hwsp_seqno(const struct i915_request *rq) { - struct i915_request *request; - - request = i915_gem_active_raw(active, mutex); - if (!request || i915_request_completed(request)) - return NULL; + u32 seqno; - return request; -} + rcu_read_lock(); /* the HWSP may be freed at runtime */ + seqno = __hwsp_seqno(rq); + rcu_read_unlock(); -/** - * i915_gem_active_get - return a reference to the active request - * @active - the active tracker - * - * i915_gem_active_get() returns a reference to the active request, or NULL - * if the active tracker is idle. The caller must hold struct_mutex. - */ -static inline struct i915_request * -i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) -{ - return i915_request_get(i915_gem_active_peek(active, mutex)); + return seqno; } -/** - * __i915_gem_active_get_rcu - return a reference to the active request - * @active - the active tracker - * - * __i915_gem_active_get() returns a reference to the active request, or NULL - * if the active tracker is idle. The caller must hold the RCU read lock, but - * the returned pointer is safe to use outside of RCU. - */ -static inline struct i915_request * -__i915_gem_active_get_rcu(const struct i915_gem_active *active) +static inline bool __i915_request_has_started(const struct i915_request *rq) { - /* - * Performing a lockless retrieval of the active request is super - * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing - * slab of request objects will not be freed whilst we hold the - * RCU read lock. It does not guarantee that the request itself - * will not be freed and then *reused*. Viz, - * - * Thread A Thread B - * - * rq = active.request - * retire(rq) -> free(rq); - * (rq is now first on the slab freelist) - * active.request = NULL - * - * rq = new submission on a new object - * ref(rq) - * - * To prevent the request from being reused whilst the caller - * uses it, we take a reference like normal. Whilst acquiring - * the reference we check that it is not in a destroyed state - * (refcnt == 0). That prevents the request being reallocated - * whilst the caller holds on to it. To check that the request - * was not reallocated as we acquired the reference we have to - * check that our request remains the active request across - * the lookup, in the same manner as a seqlock. The visibility - * of the pointer versus the reference counting is controlled - * by using RCU barriers (rcu_dereference and rcu_assign_pointer). - * - * In the middle of all that, we inspect whether the request is - * complete. Retiring is lazy so the request may be completed long - * before the active tracker is updated. Querying whether the - * request is complete is far cheaper (as it involves no locked - * instructions setting cachelines to exclusive) than acquiring - * the reference, so we do it first. The RCU read lock ensures the - * pointer dereference is valid, but does not ensure that the - * seqno nor HWS is the right one! However, if the request was - * reallocated, that means the active tracker's request was complete. - * If the new request is also complete, then both are and we can - * just report the active tracker is idle. If the new request is - * incomplete, then we acquire a reference on it and check that - * it remained the active request. - * - * It is then imperative that we do not zero the request on - * reallocation, so that we can chase the dangling pointers! - * See i915_request_alloc(). - */ - do { - struct i915_request *request; - - request = rcu_dereference(active->request); - if (!request || i915_request_completed(request)) - return NULL; - - /* - * An especially silly compiler could decide to recompute the - * result of i915_request_completed, more specifically - * re-emit the load for request->fence.seqno. A race would catch - * a later seqno value, which could flip the result from true to - * false. Which means part of the instructions below might not - * be executed, while later on instructions are executed. Due to - * barriers within the refcounting the inconsistency can't reach - * past the call to i915_request_get_rcu, but not executing - * that while still executing i915_request_put() creates - * havoc enough. Prevent this with a compiler barrier. - */ - barrier(); - - request = i915_request_get_rcu(request); - - /* - * What stops the following rcu_access_pointer() from occurring - * before the above i915_request_get_rcu()? If we were - * to read the value before pausing to get the reference to - * the request, we may not notice a change in the active - * tracker. - * - * The rcu_access_pointer() is a mere compiler barrier, which - * means both the CPU and compiler are free to perform the - * memory read without constraint. The compiler only has to - * ensure that any operations after the rcu_access_pointer() - * occur afterwards in program order. This means the read may - * be performed earlier by an out-of-order CPU, or adventurous - * compiler. - * - * The atomic operation at the heart of - * i915_request_get_rcu(), see dma_fence_get_rcu(), is - * atomic_inc_not_zero() which is only a full memory barrier - * when successful. That is, if i915_request_get_rcu() - * returns the request (and so with the reference counted - * incremented) then the following read for rcu_access_pointer() - * must occur after the atomic operation and so confirm - * that this request is the one currently being tracked. - * - * The corresponding write barrier is part of - * rcu_assign_pointer(). - */ - if (!request || request == rcu_access_pointer(active->request)) - return rcu_pointer_handoff(request); - - i915_request_put(request); - } while (1); + return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); } /** - * i915_gem_active_get_unlocked - return a reference to the active request - * @active - the active tracker + * i915_request_started - check if the request has begun being executed + * @rq: the request * - * i915_gem_active_get_unlocked() returns a reference to the active request, - * or NULL if the active tracker is idle. The reference is obtained under RCU, - * so no locking is required by the caller. + * If the timeline is not using initial breadcrumbs, a request is + * considered started if the previous request on its timeline (i.e. + * context) has been signaled. * - * The reference should be freed with i915_request_put(). - */ -static inline struct i915_request * -i915_gem_active_get_unlocked(const struct i915_gem_active *active) -{ - struct i915_request *request; - - rcu_read_lock(); - request = __i915_gem_active_get_rcu(active); - rcu_read_unlock(); - - return request; -} - -/** - * i915_gem_active_isset - report whether the active tracker is assigned - * @active - the active tracker + * If the timeline is using semaphores, it will also be emitting an + * "initial breadcrumb" after the semaphores are complete and just before + * it began executing the user payload. A request can therefore be active + * on the HW and not yet started as it is still busywaiting on its + * dependencies (via HW semaphores). * - * i915_gem_active_isset() returns true if the active tracker is currently - * assigned to a request. Due to the lazy retiring, that request may be idle - * and this may report stale information. - */ -static inline bool -i915_gem_active_isset(const struct i915_gem_active *active) -{ - return rcu_access_pointer(active->request); -} - -/** - * i915_gem_active_wait - waits until the request is completed - * @active - the active request on which to wait - * @flags - how to wait - * @timeout - how long to wait at most - * @rps - userspace client to charge for a waitboost - * - * i915_gem_active_wait() waits until the request is completed before - * returning, without requiring any locks to be held. Note that it does not - * retire any requests before returning. + * If the request has started, its dependencies will have been signaled + * (either by fences or by semaphores) and it will have begun processing + * the user payload. * - * This function relies on RCU in order to acquire the reference to the active - * request without holding any locks. See __i915_gem_active_get_rcu() for the - * glory details on how that is managed. Once the reference is acquired, we - * can then wait upon the request, and afterwards release our reference, - * free of any locking. + * However, even if a request has started, it may have been preempted and + * so no longer active, or it may have already completed. * - * This function wraps i915_request_wait(), see it for the full details on - * the arguments. + * See also i915_request_is_active(). * - * Returns 0 if successful, or a negative error code. + * Returns true if the request has begun executing the user payload, or + * has completed: */ -static inline int -i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags) +static inline bool i915_request_started(const struct i915_request *rq) { - struct i915_request *request; - long ret = 0; - - request = i915_gem_active_get_unlocked(active); - if (request) { - ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT); - i915_request_put(request); - } + if (i915_request_signaled(rq)) + return true; - return ret < 0 ? ret : 0; + /* Remember: started but may have since been preempted! */ + return __i915_request_has_started(rq); } /** - * i915_gem_active_retire - waits until the request is retired - * @active - the active request on which to wait + * i915_request_is_running - check if the request may actually be executing + * @rq: the request * - * i915_gem_active_retire() waits until the request is completed, - * and then ensures that at least the retirement handler for this - * @active tracker is called before returning. If the @active - * tracker is idle, the function returns immediately. + * Returns true if the request is currently submitted to hardware, has passed + * its start point (i.e. the context is setup and not busywaiting). Note that + * it may no longer be running by the time the function returns! */ -static inline int __must_check -i915_gem_active_retire(struct i915_gem_active *active, - struct mutex *mutex) +static inline bool i915_request_is_running(const struct i915_request *rq) { - struct i915_request *request; - long ret; - - request = i915_gem_active_raw(active, mutex); - if (!request) - return 0; + if (!i915_request_is_active(rq)) + return false; - ret = i915_request_wait(request, - I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret < 0) - return ret; + return __i915_request_has_started(rq); +} - list_del_init(&active->link); - RCU_INIT_POINTER(active->request, NULL); +static inline bool i915_request_completed(const struct i915_request *rq) +{ + if (i915_request_signaled(rq)) + return true; - active->retire(active, request); + return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); +} - return 0; +static inline void i915_request_mark_complete(struct i915_request *rq) +{ + rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ } -#define for_each_active(mask, idx) \ - for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) +void i915_retire_requests(struct drm_i915_private *i915); #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c new file mode 100644 index 000000000000..ddc403ee8855 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -0,0 +1,1437 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#include <linux/sched/mm.h> +#include <linux/stop_machine.h> + +#include "i915_drv.h" +#include "i915_gpu_error.h" +#include "i915_reset.h" + +#include "intel_guc.h" + +#define RESET_MAX_RETRIES 3 + +/* XXX How to handle concurrent GGTT updates using tiling registers? */ +#define RESET_UNDER_STOP_MACHINE 0 + +static void engine_skip_context(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *hung_ctx = rq->gem_context; + + lockdep_assert_held(&engine->timeline.lock); + + if (!i915_request_is_active(rq)) + return; + + list_for_each_entry_continue(rq, &engine->timeline.requests, link) + if (rq->gem_context == hung_ctx) + i915_request_skip(rq, -EIO); +} + +static void client_mark_guilty(struct drm_i915_file_private *file_priv, + const struct i915_gem_context *ctx) +{ + unsigned int score; + unsigned long prev_hang; + + if (i915_gem_context_is_banned(ctx)) + score = I915_CLIENT_SCORE_CONTEXT_BAN; + else + score = 0; + + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) + score += I915_CLIENT_SCORE_HANG_FAST; + + if (score) { + atomic_add(score, &file_priv->ban_score); + + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); + } +} + +static bool context_mark_guilty(struct i915_gem_context *ctx) +{ + unsigned long prev_hang; + bool banned; + int i; + + atomic_inc(&ctx->guilty_count); + + /* Cool contexts are too cool to be banned! (Used for reset testing.) */ + if (!i915_gem_context_is_bannable(ctx)) + return false; + + /* Record the timestamp for the last N hangs */ + prev_hang = ctx->hang_timestamp[0]; + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) + ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; + ctx->hang_timestamp[i] = jiffies; + + /* If we have hung N+1 times in rapid succession, we ban the context! */ + banned = !i915_gem_context_is_recoverable(ctx); + if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) + banned = true; + if (banned) { + DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", + ctx->name, atomic_read(&ctx->guilty_count)); + i915_gem_context_set_banned(ctx); + } + + if (!IS_ERR_OR_NULL(ctx->file_priv)) + client_mark_guilty(ctx->file_priv, ctx); + + return banned; +} + +static void context_mark_innocent(struct i915_gem_context *ctx) +{ + atomic_inc(&ctx->active_count); +} + +void i915_reset_request(struct i915_request *rq, bool guilty) +{ + GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", + rq->engine->name, + rq->fence.context, + rq->fence.seqno, + yesno(guilty)); + + lockdep_assert_held(&rq->engine->timeline.lock); + GEM_BUG_ON(i915_request_completed(rq)); + + if (guilty) { + i915_request_skip(rq, -EIO); + if (context_mark_guilty(rq->gem_context)) + engine_skip_context(rq); + } else { + dma_fence_set_error(&rq->fence, -EAGAIN); + context_mark_innocent(rq->gem_context); + } +} + +static void gen3_stop_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + const u32 base = engine->mmio_base; + + GEM_TRACE("%s\n", engine->name); + + if (intel_engine_stop_cs(engine)) + GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); + + I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); + POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ + + I915_WRITE_FW(RING_HEAD(base), 0); + I915_WRITE_FW(RING_TAIL(base), 0); + POSTING_READ_FW(RING_TAIL(base)); + + /* The ring must be empty before it is disabled */ + I915_WRITE_FW(RING_CTL(base), 0); + + /* Check acts as a post */ + if (I915_READ_FW(RING_HEAD(base))) + GEM_TRACE("%s: ring head [%x] not parked\n", + engine->name, I915_READ_FW(RING_HEAD(base))); +} + +static void i915_stop_engines(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask) +{ + struct intel_engine_cs *engine; + intel_engine_mask_t tmp; + + if (INTEL_GEN(i915) < 3) + return; + + for_each_engine_masked(engine, i915, engine_mask, tmp) + gen3_stop_engine(engine); +} + +static bool i915_in_reset(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return gdrst & GRDOM_RESET_STATUS; +} + +static int i915_do_reset(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = i915->drm.pdev; + int err; + + /* Assert reset for at least 20 usec, and wait for acknowledgement. */ + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + udelay(50); + err = wait_for_atomic(i915_in_reset(pdev), 50); + + /* Clear the reset request. */ + pci_write_config_byte(pdev, I915_GDRST, 0); + udelay(50); + if (!err) + err = wait_for_atomic(!i915_in_reset(pdev), 50); + + return err; +} + +static bool g4x_reset_complete(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int g33_do_reset(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = i915->drm.pdev; + + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for_atomic(g4x_reset_complete(pdev), 50); +} + +static int g4x_do_reset(struct drm_i915_private *dev_priv, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + int ret; + + /* WaVcpClkGateDisableForMediaReset:ctg,elk */ + I915_WRITE_FW(VDECCLK_GATE_D, + I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); + POSTING_READ_FW(VDECCLK_GATE_D); + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_MEDIA | GRDOM_RESET_ENABLE); + ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for_atomic(g4x_reset_complete(pdev), 50); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + +out: + pci_write_config_byte(pdev, I915_GDRST, 0); + + I915_WRITE_FW(VDECCLK_GATE_D, + I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); + POSTING_READ_FW(VDECCLK_GATE_D); + + return ret; +} + +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + int ret; + + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, + ILK_GRDOM_RESET_ENABLE, 0, + 5000, 0, + NULL); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, + ILK_GRDOM_RESET_ENABLE, 0, + 5000, 0, + NULL); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + +out: + intel_uncore_write_fw(uncore, ILK_GDSR, 0); + intel_uncore_posting_read_fw(uncore, ILK_GDSR); + return ret; +} + +/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ +static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, + u32 hw_domain_mask) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + int err; + + /* + * GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); + + /* Wait for the device to ack the reset requests */ + err = __intel_wait_for_register_fw(uncore, + GEN6_GDRST, hw_domain_mask, 0, + 500, 0, + NULL); + if (err) + DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", + hw_domain_mask); + + return err; +} + +static int gen6_reset_engines(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct intel_engine_cs *engine; + const u32 hw_engine_mask[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + u32 hw_mask; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN6_GRDOM_FULL; + } else { + intel_engine_mask_t tmp; + + hw_mask = 0; + for_each_engine_masked(engine, i915, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); + hw_mask |= hw_engine_mask[engine->id]; + } + } + + return gen6_hw_domain_reset(i915, hw_mask); +} + +static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; + u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; + i915_reg_t sfc_usage; + u32 sfc_usage_bit; + u32 sfc_reset_bit; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return 0; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); + sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VECS_SFC_USAGE(engine); + sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); + break; + + default: + return 0; + } + + /* + * Tell the engine that a software reset is going to happen. The engine + * will then try to force lock the SFC (if currently locked, it will + * remain so until we tell the engine it is safe to unlock; if currently + * unlocked, it will ignore this and all new lock requests). If SFC + * ends up being locked to the engine we want to reset, we have to reset + * it as well (we will unlock it once the reset sequence is completed). + */ + intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); + + if (__intel_wait_for_register_fw(uncore, + sfc_forced_lock_ack, + sfc_forced_lock_ack_bit, + sfc_forced_lock_ack_bit, + 1000, 0, NULL)) { + DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + return 0; + } + + if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit) + return sfc_reset_bit; + + return 0; +} + +static void gen11_unlock_sfc(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock; + u32 sfc_forced_lock_bit; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + break; + + default: + return; + } + + I915_WRITE_FW(sfc_forced_lock, + I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit); +} + +static int gen11_reset_engines(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + const u32 hw_engine_mask[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + }; + struct intel_engine_cs *engine; + intel_engine_mask_t tmp; + u32 hw_mask; + int ret; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN11_GRDOM_FULL; + } else { + hw_mask = 0; + for_each_engine_masked(engine, i915, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); + hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= gen11_lock_sfc(i915, engine); + } + } + + ret = gen6_hw_domain_reset(i915, hw_mask); + + if (engine_mask != ALL_ENGINES) + for_each_engine_masked(engine, i915, engine_mask, tmp) + gen11_unlock_sfc(i915, engine); + + return ret; +} + +static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_uncore *uncore = &engine->i915->uncore; + int ret; + + intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); + + ret = __intel_wait_for_register_fw(uncore, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700, 0, + NULL); + if (ret) + DRM_ERROR("%s: reset request timeout\n", engine->name); + + return ret; +} + +static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); +} + +static int gen8_reset_engines(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + struct intel_engine_cs *engine; + const bool reset_non_ready = retry >= 1; + intel_engine_mask_t tmp; + int ret; + + for_each_engine_masked(engine, i915, engine_mask, tmp) { + ret = gen8_engine_reset_prepare(engine); + if (ret && !reset_non_ready) + goto skip_reset; + + /* + * If this is not the first failed attempt to prepare, + * we decide to proceed anyway. + * + * By doing so we risk context corruption and with + * some gens (kbl), possible system hang if reset + * happens during active bb execution. + * + * We rather take context corruption instead of + * failed reset with a wedged driver/gpu. And + * active bb execution case should be covered by + * i915_stop_engines we have before the reset. + */ + } + + if (INTEL_GEN(i915) >= 11) + ret = gen11_reset_engines(i915, engine_mask, retry); + else + ret = gen6_reset_engines(i915, engine_mask, retry); + +skip_reset: + for_each_engine_masked(engine, i915, engine_mask, tmp) + gen8_engine_reset_cancel(engine); + + return ret; +} + +typedef int (*reset_func)(struct drm_i915_private *, + intel_engine_mask_t engine_mask, + unsigned int retry); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) >= 8) + return gen8_reset_engines; + else if (INTEL_GEN(i915) >= 6) + return gen6_reset_engines; + else if (INTEL_GEN(i915) >= 5) + return ironlake_do_reset; + else if (IS_G4X(i915)) + return g4x_do_reset; + else if (IS_G33(i915) || IS_PINEVIEW(i915)) + return g33_do_reset; + else if (INTEL_GEN(i915) >= 3) + return i915_do_reset; + else + return NULL; +} + +int intel_gpu_reset(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask) +{ + const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; + reset_func reset; + int ret = -ETIMEDOUT; + int retry; + + reset = intel_get_gpu_reset(i915); + if (!reset) + return -ENODEV; + + /* + * If the power well sleeps during the reset, the reset + * request may be dropped and never completes (causing -EIO). + */ + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + if (retry) + i915_stop_engines(i915, engine_mask); + + GEM_TRACE("engine_mask=%x\n", engine_mask); + preempt_disable(); + ret = reset(i915, engine_mask, retry); + preempt_enable(); + } + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + + return ret; +} + +bool intel_has_gpu_reset(struct drm_i915_private *i915) +{ + if (USES_GUC(i915)) + return false; + + if (!i915_modparams.reset) + return NULL; + + return intel_get_gpu_reset(i915); +} + +bool intel_has_reset_engine(struct drm_i915_private *i915) +{ + return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; +} + +int intel_reset_guc(struct drm_i915_private *i915) +{ + u32 guc_domain = + INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + int ret; + + GEM_BUG_ON(!HAS_GUC(i915)); + + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + ret = gen6_hw_domain_reset(i915, guc_domain); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + + return ret; +} + +/* + * Ensure irq handler finishes, and not run again. + * Also return the active request so that we only search for it once. + */ +static void reset_prepare_engine(struct intel_engine_cs *engine) +{ + /* + * During the reset sequence, we must prevent the engine from + * entering RC6. As the context state is undefined until we restart + * the engine, if it does enter RC6 during the reset, the state + * written to the powercontext is undefined and so we may lose + * GPU state upon resume, i.e. fail to restart after a reset. + */ + intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL); + engine->reset.prepare(engine); +} + +static void revoke_mmaps(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < i915->num_fence_regs; i++) { + struct drm_vma_offset_node *node; + struct i915_vma *vma; + u64 vma_offset; + + vma = READ_ONCE(i915->fence_regs[i].vma); + if (!vma) + continue; + + if (!i915_vma_has_userfault(vma)) + continue; + + GEM_BUG_ON(vma->fence != &i915->fence_regs[i]); + node = &vma->obj->base.vma_node; + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + } +} + +static void reset_prepare(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + reset_prepare_engine(engine); + + intel_uc_reset_prepare(i915); +} + +static void gt_revoke(struct drm_i915_private *i915) +{ + revoke_mmaps(i915); +} + +static int gt_reset(struct drm_i915_private *i915, + intel_engine_mask_t stalled_mask) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + /* + * Everything depends on having the GTT running, so we need to start + * there. + */ + err = i915_ggtt_enable_hw(i915); + if (err) + return err; + + for_each_engine(engine, i915, id) + intel_engine_reset(engine, stalled_mask & engine->mask); + + i915_gem_restore_fences(i915); + + return err; +} + +static void reset_finish_engine(struct intel_engine_cs *engine) +{ + engine->reset.finish(engine); + intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL); +} + +struct i915_gpu_restart { + struct work_struct work; + struct drm_i915_private *i915; +}; + +static void restart_work(struct work_struct *work) +{ + struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work); + struct drm_i915_private *i915 = arg->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); + mutex_lock(&i915->drm.struct_mutex); + WRITE_ONCE(i915->gpu_error.restart, NULL); + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + /* + * Ostensibily, we always want a context loaded for powersaving, + * so if the engine is idle after the reset, send a request + * to load our scratch kernel_context. + */ + if (!intel_engine_is_idle(engine)) + continue; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (!IS_ERR(rq)) + i915_request_add(rq); + } + + mutex_unlock(&i915->drm.struct_mutex); + intel_runtime_pm_put(i915, wakeref); + + kfree(arg); +} + +static void reset_finish(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + reset_finish_engine(engine); + intel_engine_signal_breadcrumbs(engine); + } +} + +static void reset_restart(struct drm_i915_private *i915) +{ + struct i915_gpu_restart *arg; + + /* + * Following the reset, ensure that we always reload context for + * powersaving, and to correct engine->last_retired_context. Since + * this requires us to submit a request, queue a worker to do that + * task for us to evade any locking here. + */ + if (READ_ONCE(i915->gpu_error.restart)) + return; + + arg = kmalloc(sizeof(*arg), GFP_KERNEL); + if (arg) { + arg->i915 = i915; + INIT_WORK(&arg->work, restart_work); + + WRITE_ONCE(i915->gpu_error.restart, arg); + queue_work(i915->wq, &arg->work); + } +} + +static void nop_submit_request(struct i915_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + GEM_TRACE("%s fence %llx:%lld -> -EIO\n", + engine->name, request->fence.context, request->fence.seqno); + dma_fence_set_error(&request->fence, -EIO); + + spin_lock_irqsave(&engine->timeline.lock, flags); + __i915_request_submit(request); + i915_request_mark_complete(request); + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + intel_engine_queue_breadcrumbs(engine); +} + +static void __i915_gem_set_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + if (test_bit(I915_WEDGED, &error->flags)) + return; + + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { + struct drm_printer p = drm_debug_printer(__func__); + + for_each_engine(engine, i915, id) + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + + GEM_TRACE("start\n"); + + /* + * First, stop submission to hw, but do not yet complete requests by + * rolling the global seqno forward (since this would complete requests + * for which we haven't set the fence error to EIO yet). + */ + reset_prepare(i915); + + /* Even if the GPU reset fails, it should still stop the engines */ + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) + intel_gpu_reset(i915, ALL_ENGINES); + + for_each_engine(engine, i915, id) { + engine->submit_request = nop_submit_request; + engine->schedule = NULL; + } + i915->caps.scheduler = 0; + + /* + * Make sure no request can slip through without getting completed by + * either this call here to intel_engine_write_global_seqno, or the one + * in nop_submit_request. + */ + synchronize_rcu_expedited(); + + /* Mark all executing requests as skipped */ + for_each_engine(engine, i915, id) + engine->cancel_requests(engine); + + reset_finish(i915); + + smp_mb__before_atomic(); + set_bit(I915_WEDGED, &error->flags); + + GEM_TRACE("end\n"); +} + +void i915_gem_set_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + intel_wakeref_t wakeref; + + mutex_lock(&error->wedge_mutex); + with_intel_runtime_pm(i915, wakeref) + __i915_gem_set_wedged(i915); + mutex_unlock(&error->wedge_mutex); +} + +static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct i915_timeline *tl; + + if (!test_bit(I915_WEDGED, &error->flags)) + return true; + + if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ + return false; + + GEM_TRACE("start\n"); + + /* + * Before unwedging, make sure that all pending operations + * are flushed and errored out - we may have requests waiting upon + * third party fences. We marked all inflight requests as EIO, and + * every execbuf since returned EIO, for consistency we want all + * the currently pending requests to also be marked as EIO, which + * is done inside our nop_submit_request - and so we must wait. + * + * No more can be submitted until we reset the wedged bit. + */ + mutex_lock(&i915->gt.timelines.mutex); + list_for_each_entry(tl, &i915->gt.timelines.active_list, link) { + struct i915_request *rq; + + rq = i915_active_request_get_unlocked(&tl->last_request); + if (!rq) + continue; + + /* + * All internal dependencies (i915_requests) will have + * been flushed by the set-wedge, but we may be stuck waiting + * for external fences. These should all be capped to 10s + * (I915_FENCE_TIMEOUT) so this wait should not be unbounded + * in the worst case. + */ + dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + } + mutex_unlock(&i915->gt.timelines.mutex); + + intel_engines_sanitize(i915, false); + + /* + * Undo nop_submit_request. We prevent all new i915 requests from + * being queued (by disallowing execbuf whilst wedged) so having + * waited for all active requests above, we know the system is idle + * and do not have to worry about a thread being inside + * engine->submit_request() as we swap over. So unlike installing + * the nop_submit_request on reset, we can do this from normal + * context and do not require stop_machine(). + */ + intel_engines_reset_default_submission(i915); + + GEM_TRACE("end\n"); + + smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ + clear_bit(I915_WEDGED, &i915->gpu_error.flags); + + return true; +} + +bool i915_gem_unset_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + bool result; + + mutex_lock(&error->wedge_mutex); + result = __i915_gem_unset_wedged(i915); + mutex_unlock(&error->wedge_mutex); + + return result; +} + +static int do_reset(struct drm_i915_private *i915, + intel_engine_mask_t stalled_mask) +{ + int err, i; + + gt_revoke(i915); + + err = intel_gpu_reset(i915, ALL_ENGINES); + for (i = 0; err && i < RESET_MAX_RETRIES; i++) { + msleep(10 * (i + 1)); + err = intel_gpu_reset(i915, ALL_ENGINES); + } + if (err) + return err; + + return gt_reset(i915, stalled_mask); +} + +/** + * i915_reset - reset chip after a hang + * @i915: #drm_i915_private to reset + * @stalled_mask: mask of the stalled engines with the guilty requests + * @reason: user error message for why we are resetting + * + * Reset the chip. Useful if a hang is detected. Marks the device as wedged + * on failure. + * + * Procedure is fairly simple: + * - reset the chip using the reset reg + * - re-init context state + * - re-init hardware status page + * - re-init ring buffer + * - re-init interrupt state + * - re-init display + */ +void i915_reset(struct drm_i915_private *i915, + intel_engine_mask_t stalled_mask, + const char *reason) +{ + struct i915_gpu_error *error = &i915->gpu_error; + int ret; + + GEM_TRACE("flags=%lx\n", error->flags); + + might_sleep(); + assert_rpm_wakelock_held(i915); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); + + /* Clear any previous failed attempts at recovery. Time to try again. */ + if (!__i915_gem_unset_wedged(i915)) + return; + + if (reason) + dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); + error->reset_count++; + + reset_prepare(i915); + + if (!intel_has_gpu_reset(i915)) { + if (i915_modparams.reset) + dev_err(i915->drm.dev, "GPU reset not supported\n"); + else + DRM_DEBUG_DRIVER("GPU reset disabled\n"); + goto error; + } + + if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + intel_runtime_pm_disable_interrupts(i915); + + if (do_reset(i915, stalled_mask)) { + dev_err(i915->drm.dev, "Failed to reset chip\n"); + goto taint; + } + + if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + intel_runtime_pm_enable_interrupts(i915); + + intel_overlay_reset(i915); + + /* + * Next we need to restore the context, but we don't use those + * yet either... + * + * Ring buffer needs to be re-initialized in the KMS case, or if X + * was running at the time of the reset (i.e. we weren't VT + * switched away). + */ + ret = i915_gem_init_hw(i915); + if (ret) { + DRM_ERROR("Failed to initialise HW following reset (%d)\n", + ret); + goto error; + } + + i915_queue_hangcheck(i915); + +finish: + reset_finish(i915); + if (!__i915_wedged(error)) + reset_restart(i915); + return; + +taint: + /* + * History tells us that if we cannot reset the GPU now, we + * never will. This then impacts everything that is run + * subsequently. On failing the reset, we mark the driver + * as wedged, preventing further execution on the GPU. + * We also want to go one step further and add a taint to the + * kernel so that any subsequent faults can be traced back to + * this failure. This is important for CI, where if the + * GPU/driver fails we would like to reboot and restart testing + * rather than continue on into oblivion. For everyone else, + * the system should still plod along, but they have been warned! + */ + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); +error: + __i915_gem_set_wedged(i915); + goto finish; +} + +static inline int intel_gt_reset_engine(struct drm_i915_private *i915, + struct intel_engine_cs *engine) +{ + return intel_gpu_reset(i915, engine->mask); +} + +/** + * i915_reset_engine - reset GPU engine to recover from a hang + * @engine: engine to reset + * @msg: reason for GPU reset; or NULL for no dev_notice() + * + * Reset a specific GPU engine. Useful if a hang is detected. + * Returns zero on successful reset or otherwise an error code. + * + * Procedure is: + * - identifies the request that caused the hang and it is dropped + * - reset engine (which will force the engine to idle) + * - re-init/configure engine + */ +int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) +{ + struct i915_gpu_error *error = &engine->i915->gpu_error; + int ret; + + GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + + reset_prepare_engine(engine); + + if (msg) + dev_notice(engine->i915->drm.dev, + "Resetting %s for %s\n", engine->name, msg); + error->reset_engine_count[engine->id]++; + + if (!engine->i915->guc.execbuf_client) + ret = intel_gt_reset_engine(engine->i915, engine); + else + ret = intel_guc_reset_engine(&engine->i915->guc, engine); + if (ret) { + /* If we fail here, we expect to fallback to a global reset */ + DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", + engine->i915->guc.execbuf_client ? "GuC " : "", + engine->name, ret); + goto out; + } + + /* + * The request that caused the hang is stuck on elsp, we know the + * active request and can drop it, adjust head to skip the offending + * request to resume executing remaining requests in the queue. + */ + intel_engine_reset(engine, true); + + /* + * The engine and its registers (and workarounds in case of render) + * have been reset to their default values. Follow the init_ring + * process to program RING_MODE, HWSP and re-enable submission. + */ + ret = engine->init_hw(engine); + if (ret) + goto out; + +out: + intel_engine_cancel_stop_cs(engine); + reset_finish_engine(engine); + return ret; +} + +static void i915_reset_device(struct drm_i915_private *i915, + u32 engine_mask, + const char *reason) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct kobject *kobj = &i915->drm.primary->kdev->kobj; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; + struct i915_wedge_me w; + + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); + + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); + + /* Use a watchdog to ensure that our reset completes */ + i915_wedge_on_timeout(&w, i915, 5 * HZ) { + intel_prepare_reset(i915); + + /* Flush everyone using a resource about to be clobbered */ + synchronize_srcu_expedited(&error->reset_backoff_srcu); + + mutex_lock(&error->wedge_mutex); + i915_reset(i915, engine_mask, reason); + mutex_unlock(&error->wedge_mutex); + + intel_finish_reset(i915); + } + + if (!test_bit(I915_WEDGED, &error->flags)) + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); +} + +static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg) +{ + I915_WRITE(reg, I915_READ(reg)); +} + +void i915_clear_error_registers(struct drm_i915_private *dev_priv) +{ + u32 eir; + + if (!IS_GEN(dev_priv, 2)) + clear_register(dev_priv, PGTBL_ER); + + if (INTEL_GEN(dev_priv) < 4) + clear_register(dev_priv, IPEIR(RENDER_RING_BASE)); + else + clear_register(dev_priv, IPEIR_I965); + + clear_register(dev_priv, EIR); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); + } + + if (INTEL_GEN(dev_priv) >= 8) { + I915_WRITE(GEN8_RING_FAULT_REG, + I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); + POSTING_READ(GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(dev_priv) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, dev_priv, id) { + I915_WRITE(RING_FAULT_REG(engine), + I915_READ(RING_FAULT_REG(engine)) & + ~RING_FAULT_VALID); + } + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0])); + } +} + +/** + * i915_handle_error - handle a gpu error + * @i915: i915 device private + * @engine_mask: mask representing engines that are hung + * @flags: control flags + * @fmt: Error message format string + * + * Do some basic checking of register state at error time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +void i915_handle_error(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + intel_engine_mask_t tmp; + char error_msg[80]; + char *msg = NULL; + + if (fmt) { + va_list args; + + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + msg = error_msg; + } + + /* + * In most cases it's guaranteed that we get here with an RPM + * reference held, for example because there is a pending GPU + * request that won't finish until the reset is done. This + * isn't the case at least when we get here by doing a + * simulated reset via debugfs, so get an RPM reference. + */ + wakeref = intel_runtime_pm_get(i915); + + engine_mask &= INTEL_INFO(i915)->engine_mask; + + if (flags & I915_ERROR_CAPTURE) { + i915_capture_error_state(i915, engine_mask, msg); + i915_clear_error_registers(i915); + } + + /* + * Try engine reset when available. We fall back to full reset if + * single reset fails. + */ + if (intel_has_reset_engine(i915) && !__i915_wedged(error)) { + for_each_engine_masked(engine, i915, engine_mask, tmp) { + BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &error->flags)) + continue; + + if (i915_reset_engine(engine, msg) == 0) + engine_mask &= ~engine->mask; + + clear_bit(I915_RESET_ENGINE + engine->id, + &error->flags); + wake_up_bit(&error->flags, + I915_RESET_ENGINE + engine->id); + } + } + + if (!engine_mask) + goto out; + + /* Full reset needs the mutex, stop any other user trying to do so. */ + if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) { + wait_event(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, &error->flags)); + goto out; /* piggy-back on the other reset */ + } + + /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ + synchronize_rcu_expedited(); + + /* Prevent any other reset-engine attempt. */ + for_each_engine(engine, i915, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &error->flags)) + wait_on_bit(&error->flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } + + i915_reset_device(i915, engine_mask, msg); + + for_each_engine(engine, i915, tmp) { + clear_bit(I915_RESET_ENGINE + engine->id, + &error->flags); + } + + clear_bit(I915_RESET_BACKOFF, &error->flags); + wake_up_all(&error->reset_queue); + +out: + intel_runtime_pm_put(i915, wakeref); +} + +int i915_reset_trylock(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + int srcu; + + might_lock(&error->reset_backoff_srcu); + might_sleep(); + + rcu_read_lock(); + while (test_bit(I915_RESET_BACKOFF, &error->flags)) { + rcu_read_unlock(); + + if (wait_event_interruptible(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, + &error->flags))) + return -EINTR; + + rcu_read_lock(); + } + srcu = srcu_read_lock(&error->reset_backoff_srcu); + rcu_read_unlock(); + + return srcu; +} + +void i915_reset_unlock(struct drm_i915_private *i915, int tag) +__releases(&i915->gpu_error.reset_backoff_srcu) +{ + struct i915_gpu_error *error = &i915->gpu_error; + + srcu_read_unlock(&error->reset_backoff_srcu, tag); +} + +int i915_terminally_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + + might_sleep(); + + if (!__i915_wedged(error)) + return 0; + + /* Reset still in progress? Maybe we will recover? */ + if (!test_bit(I915_RESET_BACKOFF, &error->flags)) + return -EIO; + + /* XXX intel_reset_finish() still takes struct_mutex!!! */ + if (mutex_is_locked(&i915->drm.struct_mutex)) + return -EAGAIN; + + if (wait_event_interruptible(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, + &error->flags))) + return -EINTR; + + return __i915_wedged(error) ? -EIO : 0; +} + +bool i915_reset_flush(struct drm_i915_private *i915) +{ + int err; + + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + + flush_workqueue(i915->wq); + GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart)); + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + MAX_SCHEDULE_TIMEOUT); + mutex_unlock(&i915->drm.struct_mutex); + + return !err; +} + +static void i915_wedge_me(struct work_struct *work) +{ + struct i915_wedge_me *w = container_of(work, typeof(*w), work.work); + + dev_err(w->i915->drm.dev, + "%s timed out, cancelling all in-flight rendering.\n", + w->name); + i915_gem_set_wedged(w->i915); +} + +void __i915_init_wedge(struct i915_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name) +{ + w->i915 = i915; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +void __i915_fini_wedge(struct i915_wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->i915 = NULL; +} diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h new file mode 100644 index 000000000000..86b1ac8116ce --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reset.h @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#ifndef I915_RESET_H +#define I915_RESET_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/srcu.h> + +#include "intel_engine_types.h" + +struct drm_i915_private; +struct intel_engine_cs; +struct intel_guc; + +__printf(4, 5) +void i915_handle_error(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...); +#define I915_ERROR_CAPTURE BIT(0) + +void i915_clear_error_registers(struct drm_i915_private *i915); + +void i915_reset(struct drm_i915_private *i915, + intel_engine_mask_t stalled_mask, + const char *reason); +int i915_reset_engine(struct intel_engine_cs *engine, + const char *reason); + +void i915_reset_request(struct i915_request *rq, bool guilty); +bool i915_reset_flush(struct drm_i915_private *i915); + +int __must_check i915_reset_trylock(struct drm_i915_private *i915); +void i915_reset_unlock(struct drm_i915_private *i915, int tag); + +int i915_terminally_wedged(struct drm_i915_private *i915); + +bool intel_has_gpu_reset(struct drm_i915_private *i915); +bool intel_has_reset_engine(struct drm_i915_private *i915); + +int intel_gpu_reset(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask); + +int intel_reset_guc(struct drm_i915_private *i915); + +struct i915_wedge_me { + struct delayed_work work; + struct drm_i915_private *i915; + const char *name; +}; + +void __i915_init_wedge(struct i915_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name); +void __i915_fini_wedge(struct i915_wedge_me *w); + +#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ + for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \ + (W)->i915; \ + __i915_fini_wedge((W))) + +#endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 340faea6c08a..77dbf7d74e12 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -7,9 +7,16 @@ #include <linux/mutex.h> #include "i915_drv.h" +#include "i915_globals.h" #include "i915_request.h" #include "i915_scheduler.h" +static struct i915_global_scheduler { + struct i915_global base; + struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_priorities; +} global; + static DEFINE_SPINLOCK(schedule_lock); static const struct i915_request * @@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node) return container_of(node, const struct i915_request, sched); } +static inline bool node_started(const struct i915_sched_node *node) +{ + return i915_request_started(node_to_request(node)); +} + static inline bool node_signaled(const struct i915_sched_node *node) { return i915_request_completed(node_to_request(node)); @@ -29,19 +41,20 @@ void i915_sched_node_init(struct i915_sched_node *node) INIT_LIST_HEAD(&node->waiters_list); INIT_LIST_HEAD(&node->link); node->attr.priority = I915_PRIORITY_INVALID; + node->semaphores = 0; + node->flags = 0; } static struct i915_dependency * -i915_dependency_alloc(struct drm_i915_private *i915) +i915_dependency_alloc(void) { - return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); + return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); } static void -i915_dependency_free(struct drm_i915_private *i915, - struct i915_dependency *dep) +i915_dependency_free(struct i915_dependency *dep) { - kmem_cache_free(i915->dependencies, dep); + kmem_cache_free(global.slab_dependencies, dep); } bool __i915_sched_node_add_dependency(struct i915_sched_node *node, @@ -60,6 +73,11 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, dep->signaler = signal; dep->flags = flags; + /* Keep track of whether anyone on this chain has a semaphore */ + if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN && + !node_started(signal)) + node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; + ret = true; } @@ -68,25 +86,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, return ret; } -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal) { struct i915_dependency *dep; - dep = i915_dependency_alloc(i915); + dep = i915_dependency_alloc(); if (!dep) return -ENOMEM; if (!__i915_sched_node_add_dependency(node, signal, dep, I915_DEPENDENCY_ALLOC)) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); return 0; } -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node) +void i915_sched_node_fini(struct i915_sched_node *node) { struct i915_dependency *dep, *tmp; @@ -106,7 +122,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } /* Remove ourselves from everyone who depends upon us */ @@ -116,7 +132,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->signal_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } spin_unlock(&schedule_lock); @@ -127,8 +143,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) return rb_entry(rb, struct i915_priolist, node); } -static void assert_priolists(struct intel_engine_execlists * const execlists, - long queue_priority) +static void assert_priolists(struct intel_engine_execlists * const execlists) { struct rb_node *rb; long last_prio, i; @@ -139,7 +154,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists, GEM_BUG_ON(rb_first_cached(&execlists->queue) != rb_first(&execlists->queue.rb_root)); - last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1; + last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1; for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { const struct i915_priolist *p = to_priolist(rb); @@ -166,7 +181,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) int idx, i; lockdep_assert_held(&engine->timeline.lock); - assert_priolists(execlists, INT_MAX); + assert_priolists(execlists); /* buckets sorted from highest [in slot 0] to lowest priority */ idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; @@ -194,7 +209,7 @@ find_priolist: if (prio == I915_PRIORITY_NORMAL) { p = &execlists->default_priolist; } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC); /* Convert an allocation failure to a priority bump */ if (unlikely(!p)) { prio = I915_PRIORITY_NORMAL; /* recurses just once */ @@ -224,8 +239,14 @@ out: return &p->requests[idx]; } +struct sched_cache { + struct list_head *priolist; +}; + static struct intel_engine_cs * -sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) +sched_lock_engine(const struct i915_sched_node *node, + struct intel_engine_cs *locked, + struct sched_cache *cache) { struct intel_engine_cs *engine = node_to_request(node)->engine; @@ -233,20 +254,33 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) if (engine != locked) { spin_unlock(&locked->timeline.lock); + memset(cache, 0, sizeof(*cache)); spin_lock(&engine->timeline.lock); } return engine; } +static bool inflight(const struct i915_request *rq, + const struct intel_engine_cs *engine) +{ + const struct i915_request *active; + + if (!i915_request_is_active(rq)) + return false; + + active = port_request(engine->execlists.port); + return active->hw_context == rq->hw_context; +} + static void __i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) { - struct list_head *uninitialized_var(pl); - struct intel_engine_cs *engine, *last; + struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; const int prio = attr->priority; + struct sched_cache cache; LIST_HEAD(dfs); /* Needed in order to use the temporary link inside i915_dependency */ @@ -283,6 +317,10 @@ static void __i915_schedule(struct i915_request *rq, list_for_each_entry(dep, &dfs, dfs_link) { struct i915_sched_node *node = dep->signaler; + /* If we are already flying, we know we have no signalers */ + if (node_started(node)) + continue; + /* * Within an engine, there can be no cycle, but we may * refer to the same dependency chain multiple times @@ -295,7 +333,6 @@ static void __i915_schedule(struct i915_request *rq, if (node_signaled(p->signaler)) continue; - GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); if (prio > READ_ONCE(p->signaler->attr.priority)) list_move_tail(&p->dfs_link, &dfs); } @@ -317,7 +354,7 @@ static void __i915_schedule(struct i915_request *rq, __list_del_entry(&stack.dfs_link); } - last = NULL; + memset(&cache, 0, sizeof(cache)); engine = rq->engine; spin_lock_irq(&engine->timeline.lock); @@ -327,7 +364,8 @@ static void __i915_schedule(struct i915_request *rq, INIT_LIST_HEAD(&dep->dfs_link); - engine = sched_lock_engine(node, engine); + engine = sched_lock_engine(node, engine, &cache); + lockdep_assert_held(&engine->timeline.lock); /* Recheck after acquiring the engine->timeline.lock */ if (prio <= node->attr.priority || node_signaled(node)) @@ -335,11 +373,11 @@ static void __i915_schedule(struct i915_request *rq, node->attr.priority = prio; if (!list_empty(&node->link)) { - if (last != engine) { - pl = i915_sched_lookup_priolist(engine, prio); - last = engine; - } - list_move_tail(&node->link, pl); + if (!cache.priolist) + cache.priolist = + i915_sched_lookup_priolist(engine, + prio); + list_move_tail(&node->link, cache.priolist); } else { /* * If the request is not in the priolist queue because @@ -353,20 +391,19 @@ static void __i915_schedule(struct i915_request *rq, continue; } - if (prio <= engine->execlists.queue_priority) + if (prio <= engine->execlists.queue_priority_hint) continue; + engine->execlists.queue_priority_hint = prio; + /* * If we are already the currently executing context, don't * bother evaluating if we should preempt ourselves. */ - if (node_to_request(node)->global_seqno && - i915_seqno_passed(port_request(engine->execlists.port)->global_seqno, - node_to_request(node)->global_seqno)) + if (inflight(node_to_request(node), engine)) continue; /* Defer (tasklet) submission until after all of our updates. */ - engine->execlists.queue_priority = prio; tasklet_hi_schedule(&engine->execlists.tasklet); } @@ -397,3 +434,45 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) spin_unlock_bh(&schedule_lock); } + +void __i915_priolist_free(struct i915_priolist *p) +{ + kmem_cache_free(global.slab_priorities, p); +} + +static void i915_global_scheduler_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_priorities); +} + +static void i915_global_scheduler_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_priorities); +} + +static struct i915_global_scheduler global = { { + .shrink = i915_global_scheduler_shrink, + .exit = i915_global_scheduler_exit, +} }; + +int __init i915_global_scheduler_init(void) +{ + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN); + if (!global.slab_dependencies) + return -ENOMEM; + + global.slab_priorities = KMEM_CACHE(i915_priolist, + SLAB_HWCACHE_ALIGN); + if (!global.slab_priorities) + goto err_priorities; + + i915_global_register(&global.base); + return 0; + +err_priorities: + kmem_cache_destroy(global.slab_priorities); + return -ENOMEM; +} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index dbe9cb7ecd82..07d243acf553 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -8,80 +8,22 @@ #define _I915_SCHEDULER_H_ #include <linux/bitops.h> +#include <linux/list.h> #include <linux/kernel.h> -#include <uapi/drm/i915_drm.h> +#include "i915_scheduler_types.h" -struct drm_i915_private; -struct i915_request; -struct intel_engine_cs; +#define priolist_for_each_request(it, plist, idx) \ + for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ + list_for_each_entry(it, &(plist)->requests[idx], sched.link) -enum { - I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, - I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, - I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, - - I915_PRIORITY_INVALID = INT_MIN -}; - -#define I915_USER_PRIORITY_SHIFT 2 -#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) - -#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) -#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) - -#define I915_PRIORITY_WAIT ((u8)BIT(0)) -#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) - -struct i915_sched_attr { - /** - * @priority: execution and service priority - * - * All clients are equal, but some are more equal than others! - * - * Requests from a context with a greater (more positive) value of - * @priority will be executed before those with a lower @priority - * value, forming a simple QoS. - * - * The &drm_i915_private.kernel_context is assigned the lowest priority. - */ - int priority; -}; - -/* - * "People assume that time is a strict progression of cause to effect, but - * actually, from a nonlinear, non-subjective viewpoint, it's more like a big - * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 - * - * Requests exist in a complex web of interdependencies. Each request - * has to wait for some other request to complete before it is ready to be run - * (e.g. we have to wait until the pixels have been rendering into a texture - * before we can copy from it). We track the readiness of a request in terms - * of fences, but we also need to keep the dependency tree for the lifetime - * of the request (beyond the life of an individual fence). We use the tree - * at various points to reorder the requests whilst keeping the requests - * in order with respect to their various dependencies. - * - * There is no active component to the "scheduler". As we know the dependency - * DAG of each request, we are able to insert it into a sorted queue when it - * is ready, and are able to reorder its portion of the graph to accommodate - * dynamic priority changes. - */ -struct i915_sched_node { - struct list_head signalers_list; /* those before us, we depend upon */ - struct list_head waiters_list; /* those after us, they depend upon us */ - struct list_head link; - struct i915_sched_attr attr; -}; - -struct i915_dependency { - struct i915_sched_node *signaler; - struct list_head signal_link; - struct list_head wait_link; - struct list_head dfs_link; - unsigned long flags; -#define I915_DEPENDENCY_ALLOC BIT(0) -}; +#define priolist_for_each_request_consume(it, n, plist, idx) \ + for (; \ + (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \ + (plist)->used &= ~BIT(idx)) \ + list_for_each_entry_safe(it, n, \ + &(plist)->requests[idx], \ + sched.link) void i915_sched_node_init(struct i915_sched_node *node); @@ -90,12 +32,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_dependency *dep, unsigned long flags); -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal); -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node); +void i915_sched_node_fini(struct i915_sched_node *node); void i915_schedule(struct i915_request *request, const struct i915_sched_attr *attr); @@ -105,4 +45,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump); struct list_head * i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio); +void __i915_priolist_free(struct i915_priolist *p); +static inline void i915_priolist_free(struct i915_priolist *p) +{ + if (p->priority != I915_PRIORITY_NORMAL) + __i915_priolist_free(p); +} + #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h new file mode 100644 index 000000000000..f1af3916a808 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef _I915_SCHEDULER_TYPES_H_ +#define _I915_SCHEDULER_TYPES_H_ + +#include <linux/list.h> + +#include "i915_priolist_types.h" +#include "intel_engine_types.h" + +struct drm_i915_private; +struct i915_request; +struct intel_engine_cs; + +struct i915_sched_attr { + /** + * @priority: execution and service priority + * + * All clients are equal, but some are more equal than others! + * + * Requests from a context with a greater (more positive) value of + * @priority will be executed before those with a lower @priority + * value, forming a simple QoS. + * + * The &drm_i915_private.kernel_context is assigned the lowest priority. + */ + int priority; +}; + +/* + * "People assume that time is a strict progression of cause to effect, but + * actually, from a nonlinear, non-subjective viewpoint, it's more like a big + * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 + * + * Requests exist in a complex web of interdependencies. Each request + * has to wait for some other request to complete before it is ready to be run + * (e.g. we have to wait until the pixels have been rendering into a texture + * before we can copy from it). We track the readiness of a request in terms + * of fences, but we also need to keep the dependency tree for the lifetime + * of the request (beyond the life of an individual fence). We use the tree + * at various points to reorder the requests whilst keeping the requests + * in order with respect to their various dependencies. + * + * There is no active component to the "scheduler". As we know the dependency + * DAG of each request, we are able to insert it into a sorted queue when it + * is ready, and are able to reorder its portion of the graph to accommodate + * dynamic priority changes. + */ +struct i915_sched_node { + struct list_head signalers_list; /* those before us, we depend upon */ + struct list_head waiters_list; /* those after us, they depend upon us */ + struct list_head link; + struct i915_sched_attr attr; + unsigned int flags; +#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0) + intel_engine_mask_t semaphores; +}; + +struct i915_dependency { + struct i915_sched_node *signaler; + struct list_head signal_link; + struct list_head wait_link; + struct list_head dfs_link; + unsigned long flags; +#define I915_DEPENDENCY_ALLOC BIT(0) +}; + +#endif /* _I915_SCHEDULER_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index a73472dd12fd..207e21b478f2 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -31,6 +31,7 @@ struct i915_selftest { unsigned long timeout_jiffies; unsigned int timeout_ms; unsigned int random_seed; + char *filter; int mock; int live; }; diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 8f3aa4dc0c98..d2f2a9c2fabd 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -24,7 +24,6 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "intel_drv.h" #include "i915_reg.h" @@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv) i915_save_display(dev_priv); - if (IS_GEN4(dev_priv)) + if (IS_GEN(dev_priv, 4)) pci_read_config_word(pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); @@ -77,17 +76,17 @@ int i915_save_state(struct drm_i915_private *dev_priv) dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ - if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { + if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); } for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { for (i = 0; i < 7; i++) dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); - } else if (HAS_GMCH_DISPLAY(dev_priv)) { + } else if (HAS_GMCH(dev_priv)) { for (i = 0; i < 16; i++) { dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); @@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); - if (IS_GEN4(dev_priv)) + if (IS_GEN(dev_priv, 4)) pci_write_config_word(pdev, GCDGMBUS, dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev_priv); @@ -122,17 +121,17 @@ int i915_restore_state(struct drm_i915_private *dev_priv) I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); /* Scratch space */ - if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { + if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { for (i = 0; i < 7; i++) I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); - } else if (HAS_GMCH_DISPLAY(dev_priv)) { + } else if (HAS_GMCH(dev_priv)) { for (i = 0; i < 16; i++) { I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index fc2eeab823b7..5387aafd3424 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence, __i915_sw_fence_notify(fence, FENCE_FREE); } -static void i915_sw_fence_complete(struct i915_sw_fence *fence) +void i915_sw_fence_complete(struct i915_sw_fence *fence) { debug_fence_assert(fence); @@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence) __i915_sw_fence_complete(fence, NULL); } -static void i915_sw_fence_await(struct i915_sw_fence *fence) +void i915_sw_fence_await(struct i915_sw_fence *fence) { debug_fence_assert(fence); WARN_ON(atomic_inc_return(&fence->pending) <= 1); @@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp); } -struct i915_sw_dma_fence_cb { - struct dma_fence_cb base; - struct i915_sw_fence *fence; -}; - struct i915_sw_dma_fence_cb_timer { struct i915_sw_dma_fence_cb base; struct dma_fence *dma; @@ -390,7 +385,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) if (!fence) return; - pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n", + pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno, @@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, return ret; } +static void __dma_i915_sw_fence_wake(struct dma_fence *dma, + struct dma_fence_cb *data) +{ + struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); + + i915_sw_fence_complete(cb->fence); +} + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb) +{ + int ret; + + debug_fence_assert(fence); + + if (dma_fence_is_signaled(dma)) + return 0; + + cb->fence = fence; + i915_sw_fence_await(fence); + + ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake); + if (ret == 0) { + ret = 1; + } else { + i915_sw_fence_complete(fence); + if (ret == -ENOENT) /* fence already signaled */ + ret = 0; + } + + return ret; +} + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 0e055ea0179f..9cb5c3b307a6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -9,14 +9,13 @@ #ifndef _I915_SW_FENCE_H_ #define _I915_SW_FENCE_H_ +#include <linux/dma-fence.h> #include <linux/gfp.h> #include <linux/kref.h> #include <linux/notifier.h> /* for NOTIFY_DONE */ #include <linux/wait.h> struct completion; -struct dma_fence; -struct dma_fence_ops; struct reservation_object; struct i915_sw_fence { @@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, struct i915_sw_fence *after, gfp_t gfp); + +struct i915_sw_dma_fence_cb { + struct dma_fence_cb base; + struct i915_sw_fence *fence; +}; + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb); int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, struct dma_fence *dma, unsigned long timeout, gfp_t gfp); + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, @@ -79,6 +88,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp); +void i915_sw_fence_await(struct i915_sw_fence *fence); +void i915_sw_fence_complete(struct i915_sw_fence *fence); + static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) { return atomic_read(&fence->pending) <= 0; diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 535caebd9813..41313005af42 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -42,11 +42,11 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) static u32 calc_residency(struct drm_i915_private *dev_priv, i915_reg_t reg) { - u64 res; + intel_wakeref_t wakeref; + u64 res = 0; - intel_runtime_pm_get(dev_priv); - res = intel_rc6_residency_us(dev_priv, reg); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + res = intel_rc6_residency_us(dev_priv, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } @@ -258,9 +258,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + intel_wakeref_t wakeref; int ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -274,7 +275,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, } mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return snprintf(buf, PAGE_SIZE, "%d\n", ret); } @@ -354,6 +355,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; u32 val; ssize_t ret; @@ -361,7 +363,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); @@ -371,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val > rps->max_freq || val < rps->min_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return -EINVAL; } @@ -392,7 +394,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret ?: count; } @@ -412,6 +414,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; u32 val; ssize_t ret; @@ -419,7 +422,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); @@ -429,7 +432,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val > rps->max_freq || val > rps->max_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return -EINVAL; } @@ -446,7 +449,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret ?: count; } @@ -521,7 +524,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, ssize_t ret; gpu = i915_first_error_state(i915); - if (gpu) { + if (IS_ERR(gpu)) { + ret = PTR_ERR(gpu); + } else if (gpu) { ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); i915_gpu_state_put(gpu); } else { diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 4667cc08c416..2f4907364920 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -6,37 +6,292 @@ #include "i915_drv.h" -#include "i915_timeline.h" +#include "i915_active.h" #include "i915_syncmap.h" +#include "i915_timeline.h" + +#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) +#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) -void i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *timeline, - const char *name) +struct i915_timeline_hwsp { + struct i915_gt_timelines *gt; + struct list_head free_link; + struct i915_vma *vma; + u64 free_bitmap; +}; + +struct i915_timeline_cacheline { + struct i915_active active; + struct i915_timeline_hwsp *hwsp; + void *vaddr; +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS +}; + +static inline struct drm_i915_private * +hwsp_to_i915(struct i915_timeline_hwsp *hwsp) { - lockdep_assert_held(&i915->drm.struct_mutex); + return container_of(hwsp->gt, struct drm_i915_private, gt.timelines); +} + +static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) + i915_gem_object_put(obj); + + return vma; +} + +static struct i915_vma * +hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) +{ + struct drm_i915_private *i915 = timeline->i915; + struct i915_gt_timelines *gt = &i915->gt.timelines; + struct i915_timeline_hwsp *hwsp; + + BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); + + spin_lock(>->hwsp_lock); + + /* hwsp_free_list only contains HWSP that have available cachelines */ + hwsp = list_first_entry_or_null(>->hwsp_free_list, + typeof(*hwsp), free_link); + if (!hwsp) { + struct i915_vma *vma; + + spin_unlock(>->hwsp_lock); + + hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); + if (!hwsp) + return ERR_PTR(-ENOMEM); + + vma = __hwsp_alloc(i915); + if (IS_ERR(vma)) { + kfree(hwsp); + return vma; + } + + vma->private = hwsp; + hwsp->vma = vma; + hwsp->free_bitmap = ~0ull; + hwsp->gt = gt; + + spin_lock(>->hwsp_lock); + list_add(&hwsp->free_link, >->hwsp_free_list); + } + + GEM_BUG_ON(!hwsp->free_bitmap); + *cacheline = __ffs64(hwsp->free_bitmap); + hwsp->free_bitmap &= ~BIT_ULL(*cacheline); + if (!hwsp->free_bitmap) + list_del(&hwsp->free_link); + + spin_unlock(>->hwsp_lock); + + GEM_BUG_ON(hwsp->vma->private != hwsp); + return hwsp->vma; +} + +static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) +{ + struct i915_gt_timelines *gt = hwsp->gt; + + spin_lock(>->hwsp_lock); + + /* As a cacheline becomes available, publish the HWSP on the freelist */ + if (!hwsp->free_bitmap) + list_add_tail(&hwsp->free_link, >->hwsp_free_list); + + GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); + hwsp->free_bitmap |= BIT_ULL(cacheline); + + /* And if no one is left using it, give the page back to the system */ + if (hwsp->free_bitmap == ~0ull) { + i915_vma_put(hwsp->vma); + list_del(&hwsp->free_link); + kfree(hwsp); + } + + spin_unlock(>->hwsp_lock); +} + +static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(!i915_active_is_idle(&cl->active)); + + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + i915_vma_put(cl->hwsp->vma); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + + i915_active_fini(&cl->active); + kfree(cl); +} + +static void __cacheline_retire(struct i915_active *active) +{ + struct i915_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + i915_vma_unpin(cl->hwsp->vma); + if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) + __idle_cacheline_free(cl); +} + +static struct i915_timeline_cacheline * +cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) +{ + struct i915_timeline_cacheline *cl; + void *vaddr; + + GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return ERR_PTR(-ENOMEM); + + vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + kfree(cl); + return ERR_CAST(vaddr); + } + + i915_vma_get(hwsp->vma); + cl->hwsp = hwsp; + cl->vaddr = page_pack_bits(vaddr, cacheline); + + i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire); + + return cl; +} + +static void cacheline_acquire(struct i915_timeline_cacheline *cl) +{ + if (cl && i915_active_acquire(&cl->active)) + __i915_vma_pin(cl->hwsp->vma); +} + +static void cacheline_release(struct i915_timeline_cacheline *cl) +{ + if (cl) + i915_active_release(&cl->active); +} + +static void cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); + cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); + + if (i915_active_is_idle(&cl->active)) + __idle_cacheline_free(cl); +} + +int i915_timeline_init(struct drm_i915_private *i915, + struct i915_timeline *timeline, + struct i915_vma *hwsp) +{ + void *vaddr; /* * Ideally we want a set of engines on a single leaf as we expect * to mostly be tracking synchronisation between engines. It is not * a huge issue if this is not the case, but we may want to mitigate * any page crossing penalties if they become an issue. + * + * Called during early_init before we know how many engines there are. */ BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); - timeline->name = name; + timeline->i915 = i915; + timeline->pin_count = 0; + timeline->has_initial_breadcrumb = !hwsp; + timeline->hwsp_cacheline = NULL; + + if (!hwsp) { + struct i915_timeline_cacheline *cl; + unsigned int cacheline; + + hwsp = hwsp_alloc(timeline, &cacheline); + if (IS_ERR(hwsp)) + return PTR_ERR(hwsp); - list_add(&timeline->link, &i915->gt.timelines); + cl = cacheline_alloc(hwsp->private, cacheline); + if (IS_ERR(cl)) { + __idle_hwsp_free(hwsp->private, cacheline); + return PTR_ERR(cl); + } - /* Called during early_init before we know how many engines there are */ + timeline->hwsp_cacheline = cl; + timeline->hwsp_offset = cacheline * CACHELINE_BYTES; + + vaddr = page_mask_bits(cl->vaddr); + } else { + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + } + + timeline->hwsp_seqno = + memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); + + timeline->hwsp_ggtt = i915_vma_get(hwsp); + GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); timeline->fence_context = dma_fence_context_alloc(1); spin_lock_init(&timeline->lock); + mutex_init(&timeline->mutex); - init_request_active(&timeline->last_request, NULL); + INIT_ACTIVE_REQUEST(&timeline->barrier); + INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); + + return 0; +} + +void i915_timelines_init(struct drm_i915_private *i915) +{ + struct i915_gt_timelines *gt = &i915->gt.timelines; + + mutex_init(>->mutex); + INIT_LIST_HEAD(>->active_list); + + spin_lock_init(>->hwsp_lock); + INIT_LIST_HEAD(>->hwsp_free_list); + + /* via i915_gem_wait_for_idle() */ + i915_gem_shrinker_taints_mutex(i915, >->mutex); +} + +static void timeline_add_to_active(struct i915_timeline *tl) +{ + struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + + mutex_lock(>->mutex); + list_add(&tl->link, >->active_list); + mutex_unlock(>->mutex); +} + +static void timeline_remove_from_active(struct i915_timeline *tl) +{ + struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + + mutex_lock(>->mutex); + list_del(&tl->link); + mutex_unlock(>->mutex); } /** @@ -51,11 +306,11 @@ void i915_timeline_init(struct drm_i915_private *i915, */ void i915_timelines_park(struct drm_i915_private *i915) { + struct i915_gt_timelines *gt = &i915->gt.timelines; struct i915_timeline *timeline; - lockdep_assert_held(&i915->drm.struct_mutex); - - list_for_each_entry(timeline, &i915->gt.timelines, link) { + mutex_lock(>->mutex); + list_for_each_entry(timeline, >->active_list, link) { /* * All known fences are completed so we can scrap * the current sync point tracking and start afresh, @@ -64,32 +319,243 @@ void i915_timelines_park(struct drm_i915_private *i915) */ i915_syncmap_free(&timeline->sync); } + mutex_unlock(>->mutex); } void i915_timeline_fini(struct i915_timeline *timeline) { + GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(!list_empty(&timeline->requests)); + GEM_BUG_ON(i915_active_request_isset(&timeline->barrier)); i915_syncmap_free(&timeline->sync); - list_del(&timeline->link); + if (timeline->hwsp_cacheline) + cacheline_free(timeline->hwsp_cacheline); + else + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + + i915_vma_put(timeline->hwsp_ggtt); } struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, const char *name) +i915_timeline_create(struct drm_i915_private *i915, + struct i915_vma *global_hwsp) { struct i915_timeline *timeline; + int err; timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); if (!timeline) return ERR_PTR(-ENOMEM); - i915_timeline_init(i915, timeline, name); + err = i915_timeline_init(i915, timeline, global_hwsp); + if (err) { + kfree(timeline); + return ERR_PTR(err); + } + kref_init(&timeline->kref); return timeline; } +int i915_timeline_pin(struct i915_timeline *tl) +{ + int err; + + if (tl->pin_count++) + return 0; + GEM_BUG_ON(!tl->pin_count); + + err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + tl->hwsp_offset = + i915_ggtt_offset(tl->hwsp_ggtt) + + offset_in_page(tl->hwsp_offset); + + cacheline_acquire(tl->hwsp_cacheline); + timeline_add_to_active(tl); + + return 0; + +unpin: + tl->pin_count = 0; + return err; +} + +static u32 timeline_advance(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); + + return tl->seqno += 1 + tl->has_initial_breadcrumb; +} + +static void timeline_rollback(struct i915_timeline *tl) +{ + tl->seqno -= 1 + tl->has_initial_breadcrumb; +} + +static noinline int +__i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + struct i915_timeline_cacheline *cl; + unsigned int cacheline; + struct i915_vma *vma; + void *vaddr; + int err; + + /* + * If there is an outstanding GPU reference to this cacheline, + * such as it being sampled by a HW semaphore on another timeline, + * we cannot wraparound our seqno value (the HW semaphore does + * a strict greater-than-or-equals compare, not i915_seqno_passed). + * So if the cacheline is still busy, we must detach ourselves + * from it and leave it inflight alongside its users. + * + * However, if nobody is watching and we can guarantee that nobody + * will, we could simply reuse the same cacheline. + * + * if (i915_active_request_is_signaled(&tl->last_request) && + * i915_active_is_signaled(&tl->hwsp_cacheline->active)) + * return 0; + * + * That seems unlikely for a busy timeline that needed to wrap in + * the first place, so just replace the cacheline. + */ + + vma = hwsp_alloc(tl, &cacheline); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_rollback; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) { + __idle_hwsp_free(vma->private, cacheline); + goto err_rollback; + } + + cl = cacheline_alloc(vma->private, cacheline); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + __idle_hwsp_free(vma->private, cacheline); + goto err_unpin; + } + GEM_BUG_ON(cl->hwsp->vma != vma); + + /* + * Attach the old cacheline to the current request, so that we only + * free it after the current request is retired, which ensures that + * all writes into the cacheline from previous requests are complete. + */ + err = i915_active_ref(&tl->hwsp_cacheline->active, + tl->fence_context, rq); + if (err) + goto err_cacheline; + + cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ + cacheline_free(tl->hwsp_cacheline); + + i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ + i915_vma_put(tl->hwsp_ggtt); + + tl->hwsp_ggtt = i915_vma_get(vma); + + vaddr = page_mask_bits(cl->vaddr); + tl->hwsp_offset = cacheline * CACHELINE_BYTES; + tl->hwsp_seqno = + memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); + + tl->hwsp_offset += i915_ggtt_offset(vma); + + cacheline_acquire(cl); + tl->hwsp_cacheline = cl; + + *seqno = timeline_advance(tl); + GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); + return 0; + +err_cacheline: + cacheline_free(cl); +err_unpin: + i915_vma_unpin(vma); +err_rollback: + timeline_rollback(tl); + return err; +} + +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + *seqno = timeline_advance(tl); + + /* Replace the HWSP on wraparound for HW semaphores */ + if (unlikely(!*seqno && tl->hwsp_cacheline)) + return __i915_timeline_get_seqno(tl, rq, seqno); + + return 0; +} + +static int cacheline_ref(struct i915_timeline_cacheline *cl, + struct i915_request *rq) +{ + return i915_active_ref(&cl->active, rq->fence.context, rq); +} + +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) +{ + struct i915_timeline_cacheline *cl = from->hwsp_cacheline; + struct i915_timeline *tl = from->timeline; + int err; + + GEM_BUG_ON(to->timeline == tl); + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = i915_request_completed(from); + if (!err) + err = cacheline_ref(cl, to); + if (!err) { + if (likely(cl == tl->hwsp_cacheline)) { + *hwsp = tl->hwsp_offset; + } else { /* across a seqno wrap, recover the original offset */ + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * + CACHELINE_BYTES; + } + } + mutex_unlock(&tl->mutex); + + return err; +} + +void i915_timeline_unpin(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + if (--tl->pin_count) + return; + + timeline_remove_from_active(tl); + cacheline_release(tl->hwsp_cacheline); + + /* + * Since this timeline is idle, all bariers upon which we were waiting + * must also be complete and so we can discard the last used barriers + * without loss of information. + */ + i915_syncmap_free(&tl->sync); + + __i915_vma_unpin(tl->hwsp_ggtt); +} + void __i915_timeline_free(struct kref *kref) { struct i915_timeline *timeline = @@ -99,6 +565,16 @@ void __i915_timeline_free(struct kref *kref) kfree(timeline); } +void i915_timelines_fini(struct drm_i915_private *i915) +{ + struct i915_gt_timelines *gt = &i915->gt.timelines; + + GEM_BUG_ON(!list_empty(>->active_list)); + GEM_BUG_ON(!list_empty(>->hwsp_free_list)); + + mutex_destroy(>->mutex); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_timeline.c" #include "selftests/i915_timeline.c" diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index ebd71b487220..4ca7f80bdf6d 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -25,62 +25,15 @@ #ifndef I915_TIMELINE_H #define I915_TIMELINE_H -#include <linux/list.h> -#include <linux/kref.h> +#include <linux/lockdep.h> -#include "i915_request.h" +#include "i915_active.h" #include "i915_syncmap.h" -#include "i915_utils.h" +#include "i915_timeline_types.h" -struct i915_timeline { - u64 fence_context; - u32 seqno; - - spinlock_t lock; -#define TIMELINE_CLIENT 0 /* default subclass */ -#define TIMELINE_ENGINE 1 - - /** - * List of breadcrumbs associated with GPU requests currently - * outstanding. - */ - struct list_head requests; - - /* Contains an RCU guarded pointer to the last request. No reference is - * held to the request, users must carefully acquire a reference to - * the request using i915_gem_active_get_request_rcu(), or hold the - * struct_mutex. - */ - struct i915_gem_active last_request; - - /** - * We track the most recent seqno that we wait on in every context so - * that we only have to emit a new await and dependency on a more - * recent sync point. As the contexts may be executed out-of-order, we - * have to track each individually and can not rely on an absolute - * global_seqno. When we know that all tracked fences are completed - * (i.e. when the driver is idle), we know that the syncmap is - * redundant and we can discard it without loss of generality. - */ - struct i915_syncmap *sync; - /** - * Separately to the inter-context seqno map above, we track the last - * barrier (e.g. semaphore wait) to the global engine timelines. Note - * that this tracks global_seqno rather than the context.seqno, and - * so it is subject to the limitations of hw wraparound and that we - * may need to revoke global_seqno (on pre-emption). - */ - u32 global_sync[I915_NUM_ENGINES]; - - struct list_head link; - const char *name; - - struct kref kref; -}; - -void i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *tl, - const char *name); +int i915_timeline_init(struct drm_i915_private *i915, + struct i915_timeline *tl, + struct i915_vma *hwsp); void i915_timeline_fini(struct i915_timeline *tl); static inline void @@ -103,7 +56,8 @@ i915_timeline_set_subclass(struct i915_timeline *timeline, } struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, const char *name); +i915_timeline_create(struct drm_i915_private *i915, + struct i915_vma *global_hwsp); static inline struct i915_timeline * i915_timeline_get(struct i915_timeline *timeline) @@ -142,6 +96,33 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno); } +int i915_timeline_pin(struct i915_timeline *tl); +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno); +void i915_timeline_unpin(struct i915_timeline *tl); + +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + +void i915_timelines_init(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915); +void i915_timelines_fini(struct drm_i915_private *i915); + +/** + * i915_timeline_set_barrier - orders submission between different timelines + * @timeline: timeline to set the barrier on + * @rq: request after which new submissions can proceed + * + * Sets the passed in request as the serialization point for all subsequent + * submissions on @timeline. Subsequent requests will not be submitted to GPU + * until the barrier has been completed. + */ +static inline int +i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq) +{ + return i915_active_request_set(&tl->barrier, rq); +} #endif diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h new file mode 100644 index 000000000000..1f5b55d9ffb5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_timeline_types.h @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_TIMELINE_TYPES_H__ +#define __I915_TIMELINE_TYPES_H__ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include "i915_active_types.h" + +struct drm_i915_private; +struct i915_vma; +struct i915_timeline_cacheline; +struct i915_syncmap; + +struct i915_timeline { + u64 fence_context; + u32 seqno; + + spinlock_t lock; +#define TIMELINE_CLIENT 0 /* default subclass */ +#define TIMELINE_ENGINE 1 + struct mutex mutex; /* protects the flow of requests */ + + unsigned int pin_count; + const u32 *hwsp_seqno; + struct i915_vma *hwsp_ggtt; + u32 hwsp_offset; + + struct i915_timeline_cacheline *hwsp_cacheline; + + bool has_initial_breadcrumb; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head requests; + + /* Contains an RCU guarded pointer to the last request. No reference is + * held to the request, users must carefully acquire a reference to + * the request using i915_active_request_get_request_rcu(), or hold the + * struct_mutex. + */ + struct i915_active_request last_request; + + /** + * We track the most recent seqno that we wait on in every context so + * that we only have to emit a new await and dependency on a more + * recent sync point. As the contexts may be executed out-of-order, we + * have to track each individually and can not rely on an absolute + * global_seqno. When we know that all tracked fences are completed + * (i.e. when the driver is idle), we know that the syncmap is + * redundant and we can discard it without loss of generality. + */ + struct i915_syncmap *sync; + + /** + * Barrier provides the ability to serialize ordering between different + * timelines. + * + * Users can call i915_timeline_set_barrier which will make all + * subsequent submissions to this timeline be executed only after the + * barrier has been completed. + */ + struct i915_active_request barrier; + + struct list_head link; + struct drm_i915_private *i915; + + struct kref kref; +}; + +#endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b50c6b829715..12893304c8f8 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -6,7 +6,8 @@ #include <linux/types.h> #include <linux/tracepoint.h> -#include <drm/drmP.h> +#include <drm/drm_drv.h> + #include "i915_drv.h" #include "intel_drv.h" #include "intel_ringbuffer.h" @@ -17,6 +18,87 @@ /* watermark/fifo updates */ +TRACE_EVENT(intel_pipe_enable, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + enum pipe _pipe; + for_each_pipe(dev_priv, _pipe) { + __entry->frame[_pipe] = + dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); + __entry->scanline[_pipe] = + intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + } + __entry->pipe = pipe; + ), + + TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_disable, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + enum pipe _pipe; + for_each_pipe(dev_priv, _pipe) { + __entry->frame[_pipe] = + dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); + __entry->scanline[_pipe] = + intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + } + __entry->pipe = pipe; + ), + + TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_crc, + TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), + TP_ARGS(crtc, crcs), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(u32, crcs, 5) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, + crtc->pipe); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], + __entry->crcs[3], __entry->crcs[4]) +); + TRACE_EVENT(intel_cpu_fifo_underrun, TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), TP_ARGS(dev_priv, pipe), @@ -585,35 +667,6 @@ TRACE_EVENT(i915_gem_evict_vm, TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) ); -TRACE_EVENT(i915_gem_ring_sync_to, - TP_PROTO(struct i915_request *to, struct i915_request *from), - TP_ARGS(to, from), - - TP_STRUCT__entry( - __field(u32, dev) - __field(u32, from_class) - __field(u32, from_instance) - __field(u32, to_class) - __field(u32, to_instance) - __field(u32, seqno) - ), - - TP_fast_assign( - __entry->dev = from->i915->drm.primary->index; - __entry->from_class = from->engine->uabi_class; - __entry->from_instance = from->engine->instance; - __entry->to_class = to->engine->uabi_class; - __entry->to_instance = to->engine->instance; - __entry->seqno = from->global_seqno; - ), - - TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u", - __entry->dev, - __entry->from_class, __entry->from_instance, - __entry->to_class, __entry->to_instance, - __entry->seqno) -); - TRACE_EVENT(i915_request_queue, TP_PROTO(struct i915_request *rq, u32 flags), TP_ARGS(rq, flags), @@ -655,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global) ), TP_fast_assign( @@ -665,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global = rq->global_seqno; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global) + __entry->hw_id, __entry->ctx, __entry->seqno) ); DEFINE_EVENT(i915_request, i915_request_add, @@ -701,7 +751,6 @@ TRACE_EVENT(i915_request_in, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global_seqno) __field(u32, port) __field(u32, prio) ), @@ -713,15 +762,14 @@ TRACE_EVENT(i915_request_in, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global_seqno = rq->global_seqno; __entry->prio = rq->sched.attr.priority; __entry->port = port; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->prio, __entry->global_seqno, __entry->port) + __entry->prio, __entry->port) ); TRACE_EVENT(i915_request_out, @@ -735,7 +783,6 @@ TRACE_EVENT(i915_request_out, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global_seqno) __field(u32, completed) ), @@ -746,14 +793,13 @@ TRACE_EVENT(i915_request_out, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global_seqno = rq->global_seqno; __entry->completed = i915_request_completed(rq); ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global_seqno, __entry->completed) + __entry->completed) ); #else @@ -780,31 +826,6 @@ trace_i915_request_out(struct i915_request *rq) #endif #endif -TRACE_EVENT(intel_engine_notify, - TP_PROTO(struct intel_engine_cs *engine, bool waiters), - TP_ARGS(engine, waiters), - - TP_STRUCT__entry( - __field(u32, dev) - __field(u16, class) - __field(u16, instance) - __field(u32, seqno) - __field(bool, waiters) - ), - - TP_fast_assign( - __entry->dev = engine->i915->drm.primary->index; - __entry->class = engine->uabi_class; - __entry->instance = engine->instance; - __entry->seqno = intel_engine_get_seqno(engine); - __entry->waiters = waiters; - ), - - TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u", - __entry->dev, __entry->class, __entry->instance, - __entry->seqno, __entry->waiters) -); - DEFINE_EVENT(i915_request, i915_request_retire, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) @@ -821,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global) __field(unsigned int, flags) ), @@ -838,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global = rq->global_seqno; __entry->flags = flags; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global, !!(__entry->flags & I915_WAIT_LOCKED), + !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) ); diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c new file mode 100644 index 000000000000..c822d0aafd2d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_user_extensions.c @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/nospec.h> +#include <linux/sched/signal.h> +#include <linux/uaccess.h> + +#include <uapi/drm/i915_drm.h> + +#include "i915_user_extensions.h" +#include "i915_utils.h" + +int i915_user_extensions(struct i915_user_extension __user *ext, + const i915_user_extension_fn *tbl, + unsigned int count, + void *data) +{ + unsigned int stackdepth = 512; + + while (ext) { + int i, err; + u32 name; + u64 next; + + if (!stackdepth--) /* recursion vs useful flexibility */ + return -E2BIG; + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) { + err = check_user_mbz(&ext->rsvd[i]); + if (err) + return err; + } + + if (get_user(name, &ext->name)) + return -EFAULT; + + err = -EINVAL; + if (name < count) { + name = array_index_nospec(name, count); + if (tbl[name]) + err = tbl[name](ext, data); + } + if (err) + return err; + + if (get_user(next, &ext->next_extension) || + overflows_type(next, ext)) + return -EFAULT; + + ext = u64_to_user_ptr(next); + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h new file mode 100644 index 000000000000..a14bf6bba9a1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_user_extensions.h @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef I915_USER_EXTENSIONS_H +#define I915_USER_EXTENSIONS_H + +struct i915_user_extension; + +typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext, + void *data); + +int i915_user_extensions(struct i915_user_extension __user *ext, + const i915_user_extension_fn *tbl, + unsigned int count, + void *data); + +#endif /* I915_USER_EXTENSIONS_H */ diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 9726df37c4c4..2dbe8933b50a 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -105,6 +105,37 @@ __T; \ }) +/* + * container_of_user: Extract the superclass from a pointer to a member. + * + * Exactly like container_of() with the exception that it plays nicely + * with sparse for __user @ptr. + */ +#define container_of_user(ptr, type, member) ({ \ + void __user *__mptr = (void __user *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type __user *)(__mptr - offsetof(type, member))); }) + +/* + * check_user_mbz: Check that a user value exists and is zero + * + * Frequently in our uABI we reserve space for future extensions, and + * two ensure that userspace is prepared we enforce that space must + * be zero. (Then any future extension can safely assume a default value + * of 0.) + * + * check_user_mbz() combines checking that the user pointer is accessible + * and that the contained value is zero. + * + * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. + */ +#define check_user_mbz(U) ({ \ + typeof(*(U)) mbz__; \ + get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ +}) + static inline u64 ptr_to_u64(const void *ptr) { return (uintptr_t)ptr; @@ -123,12 +154,6 @@ static inline u64 ptr_to_u64(const void *ptr) #include <linux/list.h> -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return head->next == list; -} - static inline void __list_del_many(struct list_head *head, struct list_head *first) { diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 869cf4a3b6de..94d3992b599d 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -60,30 +60,31 @@ */ void i915_check_vgpu(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u64 magic; u16 version_major; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); + magic = __raw_uncore_read64(uncore, vgtif_reg(magic)); if (magic != VGT_MAGIC) return; - version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); + version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major)); if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); return; } - dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); } -bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv) +bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) { - return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT; + return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT; } struct _balloon_info_ { diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 551acc390046..ebe1b7bced98 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -28,7 +28,7 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv); -bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv); +bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv); static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 5b4d78cdb4ca..36726392e737 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -25,11 +25,27 @@ #include "i915_vma.h" #include "i915_drv.h" +#include "i915_globals.h" #include "intel_ringbuffer.h" #include "intel_frontbuffer.h" #include <drm/drm_gem.h> +static struct i915_global_vma { + struct i915_global base; + struct kmem_cache *slab_vmas; +} global; + +struct i915_vma *i915_vma_alloc(void) +{ + return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); +} + +void i915_vma_free(struct i915_vma *vma) +{ + return kmem_cache_free(global.slab_vmas, vma); +} + #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) #include <linux/stackdepot.h> @@ -63,24 +79,22 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) #endif -struct i915_vma_active { - struct i915_gem_active base; - struct i915_vma *vma; - struct rb_node node; - u64 timeline; -}; - -static void -__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) +static void obj_bump_mru(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj = vma->obj; + struct drm_i915_private *i915 = to_i915(obj->base.dev); - GEM_BUG_ON(!i915_vma_is_active(vma)); - if (--vma->active_count) - return; + spin_lock(&i915->mm.obj_lock); + if (obj->bind_count) + list_move_tail(&obj->mm.link, &i915->mm.bound_list); + spin_unlock(&i915->mm.obj_lock); - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + obj->mm.dirty = true; /* be paranoid */ +} + +static void __i915_vma_retire(struct i915_active *ref) +{ + struct i915_vma *vma = container_of(ref, typeof(*vma), active); + struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!i915_gem_object_is_active(obj)); if (--obj->active_count) @@ -93,16 +107,12 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) reservation_object_unlock(obj->resv); } - /* Bump our place on the bound list to keep it roughly in LRU order + /* + * Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ - spin_lock(&rq->i915->mm.obj_lock); - if (obj->bind_count) - list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); - spin_unlock(&rq->i915->mm.obj_lock); - - obj->mm.dirty = true; /* be paranoid */ + obj_bump_mru(obj); if (i915_gem_object_has_active_reference(obj)) { i915_gem_object_clear_active_reference(obj); @@ -110,21 +120,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) } } -static void -i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) -{ - struct i915_vma_active *active = - container_of(base, typeof(*active), base); - - __i915_vma_retire(active->vma, rq); -} - -static void -i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) -{ - __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); -} - static struct i915_vma * vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -136,14 +131,13 @@ vma_create(struct drm_i915_gem_object *obj, /* The aliasing_ppgtt should never be used directly! */ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); - vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); + vma = i915_vma_alloc(); if (vma == NULL) return ERR_PTR(-ENOMEM); - vma->active = RB_ROOT; + i915_active_init(vm->i915, &vma->active, __i915_vma_retire); + INIT_ACTIVE_REQUEST(&vma->last_fence); - init_request_active(&vma->last_active, i915_vma_last_retire); - init_request_active(&vma->last_fence, NULL); vma->vm = vm; vma->ops = &vm->vma_ops; vma->obj = obj; @@ -190,38 +184,61 @@ vma_create(struct drm_i915_gem_object *obj, i915_gem_object_get_stride(obj)); GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); - /* - * We put the GGTT vma at the start of the vma-list, followed - * by the ppGGTT vma. This allows us to break early when - * iterating over only the GGTT vma for an object, see - * for_each_ggtt_vma() - */ vma->flags |= I915_VMA_GGTT; - list_add(&vma->obj_link, &obj->vma_list); - } else { - list_add_tail(&vma->obj_link, &obj->vma_list); } + spin_lock(&obj->vma.lock); + rb = NULL; - p = &obj->vma_tree.rb_node; + p = &obj->vma.tree.rb_node; while (*p) { struct i915_vma *pos; + long cmp; rb = *p; pos = rb_entry(rb, struct i915_vma, obj_node); - if (i915_vma_compare(pos, vm, view) < 0) + + /* + * If the view already exists in the tree, another thread + * already created a matching vma, so return the older instance + * and dispose of ours. + */ + cmp = i915_vma_compare(pos, vm, view); + if (cmp == 0) { + spin_unlock(&obj->vma.lock); + i915_vma_free(vma); + return pos; + } + + if (cmp < 0) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&vma->obj_node, rb, p); - rb_insert_color(&vma->obj_node, &obj->vma_tree); + rb_insert_color(&vma->obj_node, &obj->vma.tree); + + if (i915_vma_is_ggtt(vma)) + /* + * We put the GGTT vma at the start of the vma-list, followed + * by the ppGGTT vma. This allows us to break early when + * iterating over only the GGTT vma for an object, see + * for_each_ggtt_vma() + */ + list_add(&vma->obj_link, &obj->vma.list); + else + list_add_tail(&vma->obj_link, &obj->vma.list); + + spin_unlock(&obj->vma.lock); + + mutex_lock(&vm->mutex); list_add(&vma->vm_link, &vm->unbound_list); + mutex_unlock(&vm->mutex); return vma; err_vma: - kmem_cache_free(vm->i915->vmas, vma); + i915_vma_free(vma); return ERR_PTR(-E2BIG); } @@ -232,7 +249,7 @@ vma_lookup(struct drm_i915_gem_object *obj, { struct rb_node *rb; - rb = obj->vma_tree.rb_node; + rb = obj->vma.tree.rb_node; while (rb) { struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); long cmp; @@ -272,16 +289,18 @@ i915_vma_instance(struct drm_i915_gem_object *obj, { struct i915_vma *vma; - lockdep_assert_held(&obj->base.dev->struct_mutex); GEM_BUG_ON(view && !i915_is_ggtt(vm)); GEM_BUG_ON(vm->closed); + spin_lock(&obj->vma.lock); vma = vma_lookup(obj, vm, view); - if (!vma) + spin_unlock(&obj->vma.lock); + + /* vma_create() will resolve the race if another creates the vma */ + if (unlikely(!vma)) vma = vma_create(obj, vm, view); GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); - GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma); return vma; } @@ -659,7 +678,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + mutex_lock(&vma->vm->mutex); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + mutex_unlock(&vma->vm->mutex); if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; @@ -692,8 +713,10 @@ i915_vma_remove(struct i915_vma *vma) vma->ops->clear_pages(vma); + mutex_lock(&vma->vm->mutex); drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + mutex_unlock(&vma->vm->mutex); /* * Since the unbound list is global, only move to that list if @@ -796,25 +819,27 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - struct i915_vma_active *iter, *n; - GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); - GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); + GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); - list_del(&vma->obj_link); + mutex_lock(&vma->vm->mutex); list_del(&vma->vm_link); - if (vma->obj) - rb_erase(&vma->obj_node, &vma->obj->vma_tree); + mutex_unlock(&vma->vm->mutex); + + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; - rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { - GEM_BUG_ON(i915_gem_active_isset(&iter->base)); - kfree(iter); + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + rb_erase(&vma->obj_node, &vma->obj->vma.tree); + spin_unlock(&obj->vma.lock); } - kmem_cache_free(i915->vmas, vma); + i915_active_fini(&vma->active); + + i915_vma_free(vma); } void i915_vma_destroy(struct i915_vma *vma) @@ -897,104 +922,15 @@ static void export_fence(struct i915_vma *vma, reservation_object_unlock(resv); } -static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) -{ - struct i915_vma_active *active; - struct rb_node **p, *parent; - struct i915_request *old; - - /* - * We track the most recently used timeline to skip a rbtree search - * for the common case, under typical loads we never need the rbtree - * at all. We can reuse the last_active slot if it is empty, that is - * after the previous activity has been retired, or if the active - * matches the current timeline. - * - * Note that we allow the timeline to be active simultaneously in - * the rbtree and the last_active cache. We do this to avoid having - * to search and replace the rbtree element for a new timeline, with - * the cost being that we must be aware that the vma may be retired - * twice for the same timeline (as the older rbtree element will be - * retired before the new request added to last_active). - */ - old = i915_gem_active_raw(&vma->last_active, - &vma->vm->i915->drm.struct_mutex); - if (!old || old->fence.context == idx) - goto out; - - /* Move the currently active fence into the rbtree */ - idx = old->fence.context; - - parent = NULL; - p = &vma->active.rb_node; - while (*p) { - parent = *p; - - active = rb_entry(parent, struct i915_vma_active, node); - if (active->timeline == idx) - goto replace; - - if (active->timeline < idx) - p = &parent->rb_right; - else - p = &parent->rb_left; - } - - active = kmalloc(sizeof(*active), GFP_KERNEL); - - /* kmalloc may retire the vma->last_active request (thanks shrinker)! */ - if (unlikely(!i915_gem_active_raw(&vma->last_active, - &vma->vm->i915->drm.struct_mutex))) { - kfree(active); - goto out; - } - - if (unlikely(!active)) - return ERR_PTR(-ENOMEM); - - init_request_active(&active->base, i915_vma_retire); - active->vma = vma; - active->timeline = idx; - - rb_link_node(&active->node, parent, p); - rb_insert_color(&active->node, &vma->active); - -replace: - /* - * Overwrite the previous active slot in the rbtree with last_active, - * leaving last_active zeroed. If the previous slot is still active, - * we must be careful as we now only expect to receive one retire - * callback not two, and so much undo the active counting for the - * overwritten slot. - */ - if (i915_gem_active_isset(&active->base)) { - /* Retire ourselves from the old rq->active_list */ - __list_del_entry(&active->base.link); - vma->active_count--; - GEM_BUG_ON(!vma->active_count); - } - GEM_BUG_ON(list_empty(&vma->last_active.link)); - list_replace_init(&vma->last_active.link, &active->base.link); - active->base.request = fetch_and_zero(&vma->last_active.request); - -out: - return &vma->last_active; -} - int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; - struct i915_gem_active *active; lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - active = active_instance(vma, rq->fence.context); - if (IS_ERR(active)) - return PTR_ERR(active); - /* * Add a reference if we're newly entering the active list. * The order in which we add operations to the retirement queue is @@ -1003,11 +939,15 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!i915_gem_active_isset(active) && !vma->active_count++) { - list_move_tail(&vma->vm_link, &vma->vm->active_list); + if (!vma->active.count) obj->active_count++; + + if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { + if (!vma->active.count) + obj->active_count--; + return -ENOMEM; } - i915_gem_active_set(active, rq); + GEM_BUG_ON(!i915_vma_is_active(vma)); GEM_BUG_ON(!obj->active_count); @@ -1016,14 +956,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, obj->write_domain = I915_GEM_DOMAIN_RENDER; if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) - i915_gem_active_set(&obj->frontbuffer_write, rq); + __i915_active_request_set(&obj->frontbuffer_write, rq); obj->read_domains = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; if (flags & EXEC_OBJECT_NEEDS_FENCE) - i915_gem_active_set(&vma->last_fence, rq); + __i915_active_request_set(&vma->last_fence, rq); export_fence(vma, rq, flags); return 0; @@ -1041,8 +981,6 @@ int i915_vma_unbind(struct i915_vma *vma) */ might_sleep(); if (i915_vma_is_active(vma)) { - struct i915_vma_active *active, *n; - /* * When a closed VMA is retired, it is unbound - eek. * In order to prevent it from being recursively closed, @@ -1058,21 +996,12 @@ int i915_vma_unbind(struct i915_vma *vma) */ __i915_vma_pin(vma); - ret = i915_gem_active_retire(&vma->last_active, - &vma->vm->i915->drm.struct_mutex); + ret = i915_active_wait(&vma->active); if (ret) goto unpin; - rbtree_postorder_for_each_entry_safe(active, n, - &vma->active, node) { - ret = i915_gem_active_retire(&active->base, - &vma->vm->i915->drm.struct_mutex); - if (ret) - goto unpin; - } - - ret = i915_gem_active_retire(&vma->last_fence, - &vma->vm->i915->drm.struct_mutex); + ret = i915_active_request_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); unpin: __i915_vma_unpin(vma); if (ret) @@ -1126,3 +1055,28 @@ unpin: #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_vma.c" #endif + +static void i915_global_vma_shrink(void) +{ + kmem_cache_shrink(global.slab_vmas); +} + +static void i915_global_vma_exit(void) +{ + kmem_cache_destroy(global.slab_vmas); +} + +static struct i915_global_vma global = { { + .shrink = i915_global_vma_shrink, + .exit = i915_global_vma_exit, +} }; + +int __init i915_global_vma_init(void) +{ + global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); + if (!global.slab_vmas) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4f7c1c7599f4..6eab70953a57 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -34,6 +34,7 @@ #include "i915_gem_fence_reg.h" #include "i915_gem_object.h" +#include "i915_active.h" #include "i915_request.h" enum i915_cache_level; @@ -71,34 +72,45 @@ struct i915_vma { unsigned int open_count; unsigned long flags; /** - * How many users have pinned this object in GTT space. The following - * users can each hold at most one reference: pwrite/pread, execbuffer - * (objects are not allowed multiple times for the same batchbuffer), - * and the framebuffer code. When switching/pageflipping, the - * framebuffer code has at most two buffers pinned per crtc. + * How many users have pinned this object in GTT space. * - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 - * bits with absolutely no headroom. So use 4 bits. + * This is a tightly bound, fairly small number of users, so we + * stuff inside the flags field so that we can both check for overflow + * and detect a no-op i915_vma_pin() in a single check, while also + * pinning the vma. + * + * The worst case display setup would have the same vma pinned for + * use on each plane on each crtc, while also building the next atomic + * state and holding a pin for the length of the cleanup queue. In the + * future, the flip queue may be increased from 1. + * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84 + * + * For GEM, the number of concurrent users for pwrite/pread is + * unbounded. For execbuffer, it is currently one but will in future + * be extended to allow multiple clients to pin vma concurrently. + * + * We also use suballocated pages, with each suballocation claiming + * its own pin on the shared vma. At present, this is limited to + * exclusive cachelines of a single page, so a maximum of 64 possible + * users. */ -#define I915_VMA_PIN_MASK 0xf -#define I915_VMA_PIN_OVERFLOW BIT(5) +#define I915_VMA_PIN_MASK 0xff +#define I915_VMA_PIN_OVERFLOW BIT(8) /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND BIT(6) -#define I915_VMA_LOCAL_BIND BIT(7) +#define I915_VMA_GLOBAL_BIND BIT(9) +#define I915_VMA_LOCAL_BIND BIT(10) #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) -#define I915_VMA_GGTT BIT(8) -#define I915_VMA_CAN_FENCE BIT(9) -#define I915_VMA_CLOSED BIT(10) -#define I915_VMA_USERFAULT_BIT 11 +#define I915_VMA_GGTT BIT(11) +#define I915_VMA_CAN_FENCE BIT(12) +#define I915_VMA_CLOSED BIT(13) +#define I915_VMA_USERFAULT_BIT 14 #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) -#define I915_VMA_GGTT_WRITE BIT(12) +#define I915_VMA_GGTT_WRITE BIT(15) - unsigned int active_count; - struct rb_root active; - struct i915_gem_active last_active; - struct i915_gem_active last_fence; + struct i915_active active; + struct i915_active_request last_fence; /** * Support different GGTT views into the same object. @@ -141,9 +153,9 @@ i915_vma_instance(struct drm_i915_gem_object *obj, void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags); #define I915_VMA_RELEASE_MAP BIT(0) -static inline bool i915_vma_is_active(struct i915_vma *vma) +static inline bool i915_vma_is_active(const struct i915_vma *vma) { - return vma->active_count; + return !i915_active_is_idle(&vma->active); } int __must_check i915_vma_move_to_active(struct i915_vma *vma, @@ -425,7 +437,10 @@ void i915_vma_parked(struct drm_i915_private *i915); * or the list is empty ofc. */ #define for_each_ggtt_vma(V, OBJ) \ - list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \ + list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ for_each_until(!i915_vma_is_ggtt(V)) +struct i915_vma *i915_vma_alloc(void); +void i915_vma_free(struct i915_vma *vma); + #endif diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 4dd793b78996..b67ffaa283dc 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -246,13 +246,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) for (lane = 0; lane <= 3; lane++) { /* Bspec: must not use GRP register for write */ - tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); } } } @@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) } for_each_dsi_port(port, intel_dsi->ports) { - intel_display_power_get(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); + intel_dsi->io_wakeref[port] = + intel_display_power_get(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); } } @@ -388,11 +390,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) tmp &= ~LOADGEN_SELECT; I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); for (lane = 0; lane <= 3; lane++) { - tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); tmp &= ~LOADGEN_SELECT; if (lane != 2) tmp |= LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); } } @@ -859,7 +861,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) I915_WRITE(PIPECONF(dsi_trans), tmp); /* wait for transcoder to be enabled */ - if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, I965_PIPECONF_ACTIVE, 10)) DRM_ERROR("DSI transcoder not enabled\n"); @@ -1037,7 +1040,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) I915_WRITE(PIPECONF(dsi_trans), tmp); /* wait for transcoder to be disabled */ - if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, 0, 50)) DRM_ERROR("DSI trancoder not disabled\n"); } @@ -1125,10 +1129,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) enum port port; u32 tmp; - intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO); - - if (intel_dsi->dual_link) - intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO); + for_each_dsi_port(port, intel_dsi->ports) { + intel_wakeref_t wakeref; + + wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); + if (wakeref) { + intel_display_power_put(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO, + wakeref); + } + } /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1169,18 +1181,17 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u32 pll_id; /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); - pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + pipe_config->port_clock = + cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); } -static bool gen11_dsi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int gen11_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); @@ -1205,7 +1216,7 @@ static bool gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; - return true; + return 0; } static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, @@ -1229,13 +1240,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u32 tmp; - enum port port; enum transcoder dsi_trans; + intel_wakeref_t wakeref; + enum port port; bool ret = false; + u32 tmp; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; for_each_dsi_port(port, intel_dsi->ports) { @@ -1260,7 +1273,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, ret = tmp & PIPECONF_ENABLE; } out: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -1349,7 +1362,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) struct intel_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; - struct drm_display_mode *scan, *fixed_mode = NULL; + struct drm_display_mode *fixed_mode; enum port port; if (!intel_bios_is_dsi_present(dev_priv, &port)) @@ -1378,6 +1391,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->disable = gen11_dsi_disable; encoder->port = port; encoder->get_config = gen11_dsi_get_config; + encoder->update_pipe = intel_panel_update_backlight; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->type = INTEL_OUTPUT_DSI; @@ -1398,15 +1412,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) /* attach connector to encoder */ intel_connector_attach_encoder(intel_connector, encoder); - /* fill mode info from VBT */ mutex_lock(&dev->mode_config.mutex); - intel_dsi_vbt_get_modes(intel_dsi); - list_for_each_entry(scan, &connector->probed_modes, head) { - if (scan->type & DRM_MODE_TYPE_PREFERRED) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { @@ -1414,12 +1421,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) goto err; } - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); - if (dev_priv->vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); else diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 6ba478e57b9b..9d142d038a7d 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -6,7 +6,6 @@ */ #include <linux/pci.h> #include <linux/acpi.h> -#include <drm/drmP.h> #include "i915_drv.h" #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 8cb02f28d30c..b844e8840c6f 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -29,10 +29,11 @@ * See intel_atomic_plane.c for the plane-specific atomic functionality. */ -#include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> + #include "intel_drv.h" /** @@ -47,7 +48,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -79,7 +80,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val) + u64 val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -125,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, */ if (new_conn_state->force_audio != old_conn_state->force_audio || new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || + new_conn_state->base.colorspace != old_conn_state->base.colorspace || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) @@ -233,10 +235,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta if (plane_state && plane_state->base.fb && plane_state->base.fb->format->is_yuv && plane_state->base.fb->format->num_planes > 1) { - if (IS_GEN9(dev_priv) && + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) { mode = SKL_PS_SCALER_MODE_NV12; - } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { + } else if (icl_is_hdr_plane(dev_priv, plane->id)) { /* * On gen11+'s HDR planes we only use the scaler for * scaling. They have a dedicated chroma upsampler, so diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 0a73e6e65c20..9d32a6fcf840 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -31,9 +31,10 @@ * prepare/check/commit/cleanup steps. */ -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> + #include "intel_drv.h" struct intel_plane *intel_plane_alloc(void) @@ -111,41 +112,44 @@ intel_plane_destroy_state(struct drm_plane *plane, } int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *crtc_state, + struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, - struct intel_plane_state *intel_state) + struct intel_plane_state *new_plane_state) { - struct drm_plane *plane = intel_state->base.plane; - struct drm_plane_state *state = &intel_state->base; - struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); int ret; - crtc_state->active_planes &= ~BIT(intel_plane->id); - crtc_state->nv12_planes &= ~BIT(intel_plane->id); - intel_state->base.visible = false; + new_crtc_state->active_planes &= ~BIT(plane->id); + new_crtc_state->nv12_planes &= ~BIT(plane->id); + new_crtc_state->c8_planes &= ~BIT(plane->id); + new_plane_state->base.visible = false; - /* If this is a cursor plane, no further checks are needed. */ - if (!intel_state->base.crtc && !old_plane_state->base.crtc) + if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) return 0; - ret = intel_plane->check_plane(crtc_state, intel_state); + ret = plane->check_plane(new_crtc_state, new_plane_state); if (ret) return ret; /* FIXME pre-g4x don't work like this */ - if (state->visible) - crtc_state->active_planes |= BIT(intel_plane->id); + if (new_plane_state->base.visible) + new_crtc_state->active_planes |= BIT(plane->id); + + if (new_plane_state->base.visible && + is_planar_yuv_format(new_plane_state->base.fb->format->format)) + new_crtc_state->nv12_planes |= BIT(plane->id); - if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) - crtc_state->nv12_planes |= BIT(intel_plane->id); + if (new_plane_state->base.visible && + new_plane_state->base.fb->format->format == DRM_FORMAT_C8) + new_crtc_state->c8_planes |= BIT(plane->id); - if (state->visible || old_plane_state->base.visible) - crtc_state->update_planes |= BIT(intel_plane->id); + if (new_plane_state->base.visible || old_plane_state->base.visible) + new_crtc_state->update_planes |= BIT(plane->id); return intel_plane_atomic_calc_changes(old_crtc_state, - &crtc_state->base, + &new_crtc_state->base, old_plane_state, - state); + &new_plane_state->base); } static int intel_plane_atomic_check(struct drm_plane *plane, @@ -215,6 +219,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } +void intel_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_update_plane(&plane->base, crtc); + plane->update_plane(plane, crtc_state, plane_state); +} + +void intel_update_slave(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_update_plane(&plane->base, crtc); + plane->update_slave(plane, crtc_state, plane_state); +} + +void intel_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, crtc_state); +} + void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -239,8 +272,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, intel_atomic_get_new_plane_state(state, plane); if (new_plane_state->base.visible) { - trace_intel_update_plane(&plane->base, crtc); - plane->update_plane(plane, new_crtc_state, new_plane_state); + intel_update_plane(plane, new_crtc_state, new_plane_state); } else if (new_plane_state->slave) { struct intel_plane *master = new_plane_state->linked_plane; @@ -257,11 +289,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, new_plane_state = intel_atomic_get_new_plane_state(state, master); - trace_intel_update_plane(&plane->base, crtc); - plane->update_slave(plane, new_crtc_state, new_plane_state); + intel_update_slave(plane, new_crtc_state, new_plane_state); } else { - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, new_crtc_state); + intel_disable_plane(plane, new_crtc_state); } } } @@ -281,13 +311,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - if (new_plane_state->base.visible) { - trace_intel_update_plane(&plane->base, crtc); - plane->update_plane(plane, new_crtc_state, new_plane_state); - } else { - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, new_crtc_state); - } + if (new_plane_state->base.visible) + intel_update_plane(plane, new_crtc_state, new_plane_state); + else + intel_disable_plane(plane, new_crtc_state); } } @@ -312,7 +339,7 @@ int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", property->base.id, property->name); @@ -335,7 +362,7 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, - uint64_t val) + u64 val) { DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", property->base.id, property->name); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index ae55a6865d5c..20324b0d34c7 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -27,7 +27,6 @@ #include <drm/intel_lpe_audio.h> #include "intel_drv.h" -#include <drm/drmP.h> #include <drm/drm_edid.h> #include "i915_drv.h" @@ -742,26 +741,91 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv) } } -static void i915_audio_component_get_power(struct device *kdev) +static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, + bool enable) { - intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + int ret; + + drm_modeset_acquire_init(&ctx, 0); + state = drm_atomic_state_alloc(&dev_priv->drm); + if (WARN_ON(!state)) + return; + + state->acquire_ctx = &ctx; + +retry: + to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true; + to_intel_atomic_state(state)->cdclk.force_min_cdclk = + enable ? 2 * 96000 : 0; + + /* + * Protects dev_priv->cdclk.force_min_cdclk + * Need to lock this here in case we have no active pipes + * and thus wouldn't lock it during the commit otherwise. + */ + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, + &ctx); + if (!ret) + ret = drm_atomic_commit(state); + + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + WARN_ON(ret); + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } -static void i915_audio_component_put_power(struct device *kdev) +static unsigned long i915_audio_component_get_power(struct device *kdev) { - intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + intel_wakeref_t ret; + + /* Catch potential impedance mismatches before they occur! */ + BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long)); + + ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); + + /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */ + if (dev_priv->audio_power_refcount++ == 0) + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + glk_force_audio_cdclk(dev_priv, true); + + return ret; +} + +static void i915_audio_component_put_power(struct device *kdev, + unsigned long cookie) +{ + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + + /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ + if (--dev_priv->audio_power_refcount == 0) + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + glk_force_audio_cdclk(dev_priv, false); + + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); } static void i915_audio_component_codec_wake_override(struct device *kdev, bool enable) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + unsigned long cookie; u32 tmp; - if (!IS_GEN9(dev_priv)) + if (!IS_GEN(dev_priv, 9)) return; - i915_audio_component_get_power(kdev); + cookie = i915_audio_component_get_power(kdev); /* * Enable/disable generating the codec wake signal, overriding the @@ -779,7 +843,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, usleep_range(1000, 1500); } - i915_audio_component_put_power(kdev); + i915_audio_component_put_power(kdev, cookie); } /* Get CDCLK in kHz */ @@ -850,12 +914,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_encoder *encoder; struct intel_crtc *crtc; + unsigned long cookie; int err = 0; if (!HAS_DDI(dev_priv)) return 0; - i915_audio_component_get_power(kdev); + cookie = i915_audio_component_get_power(kdev); mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ @@ -875,7 +940,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, unlock: mutex_unlock(&dev_priv->av_mutex); - i915_audio_component_put_power(kdev); + i915_audio_component_put_power(kdev, cookie); return err; } @@ -984,7 +1049,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); + ret = component_add_typed(dev_priv->drm.dev, + &i915_audio_component_bind_ops, + I915_COMPONENT_AUDIO); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 6d3e0260d49c..1dc8d03ff127 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -26,7 +26,6 @@ */ #include <drm/drm_dp_helper.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) * Only parse SDVO mappings on gens that could have SDVO. This isn't * accurate and doesn't have to be, as long as it's not too strict. */ - if (!IS_GEN(dev_priv, 3, 7)) { + if (!IS_GEN_RANGE(dev_priv, 3, 7)) { DRM_DEBUG_KMS("Skipping SDVO device mapping\n"); return; } @@ -761,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } + + if (bdb->version >= 226) { + u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + + wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; + switch (wakeup_time) { + case 0: + wakeup_time = 500; + break; + case 1: + wakeup_time = 100; + break; + case 3: + wakeup_time = 50; + break; + default: + case 2: + wakeup_time = 2500; + break; + } + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time; + } else { + /* Reusing PSR1 wakeup time for PSR2 in older VBTs */ + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us; + } } static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv, @@ -1223,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, if (!info->alternate_ddc_pin) return; - for_each_port_masked(p, (1 << port) - 1) { + for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (info->alternate_ddc_pin != i->alternate_ddc_pin) + if (p == port || !i->present || + info->alternate_ddc_pin != i->alternate_ddc_pin) continue; DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " @@ -1240,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, * port. Otherwise they share the same ddc bin and * system couldn't communicate with them separately. * - * Due to parsing the ports in alphabetical order, - * a higher port will always clobber a lower one. + * Due to parsing the ports in child device order, + * a later device will always clobber an earlier one. */ i->supports_dvi = false; i->supports_hdmi = false; @@ -1259,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, if (!info->alternate_aux_channel) return; - for_each_port_masked(p, (1 << port) - 1) { + for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (info->alternate_aux_channel != i->alternate_aux_channel) + if (p == port || !i->present || + info->alternate_aux_channel != i->alternate_aux_channel) continue; DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " @@ -1276,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, * port. Otherwise they share the same aux channel * and system couldn't communicate with them separately. * - * Due to parsing the ports in alphabetical order, - * a higher port will always clobber a lower one. + * Due to parsing the ports in child device order, + * a later device will always clobber an earlier one. */ i->supports_dp = false; i->alternate_aux_channel = 0; @@ -1325,48 +1351,57 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) return 0; } -static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, - u8 bdb_version) +static enum port dvo_port_to_port(u8 dvo_port) { - struct child_device_config *it, *child = NULL; - struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; - int i, j; - bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; - /* Each DDI port can have more than one value on the "DVO Port" field, + /* + * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. */ - int dvo_ports[][3] = { - {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, - {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, - {DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, - {DVO_PORT_HDMID, DVO_PORT_DPD, -1}, - {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, - {DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, + static const int dvo_ports[][3] = { + [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, + [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, + [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, + [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1}, + [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, + [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, }; + enum port port; + int i; - /* - * Find the first child device to reference the port, report if more - * than one found. - */ - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - it = dev_priv->vbt.child_dev + i; - - for (j = 0; j < 3; j++) { - if (dvo_ports[port][j] == -1) + for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) { + for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) { + if (dvo_ports[port][i] == -1) break; - if (it->dvo_port == dvo_ports[port][j]) { - if (child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", - port_name(port)); - } else { - child = it; - } - } + if (dvo_port == dvo_ports[port][i]) + return port; } } - if (!child) + + return PORT_NONE; +} + +static void parse_ddi_port(struct drm_i915_private *dev_priv, + const struct child_device_config *child, + u8 bdb_version) +{ + struct ddi_vbt_port_info *info; + bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; + enum port port; + + port = dvo_port_to_port(child->dvo_port); + if (port == PORT_NONE) + return; + + info = &dev_priv->vbt.ddi_port_info[port]; + + if (info->present) { + DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", + port_name(port)); return; + } + + info->present = true; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1386,8 +1421,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, info->supports_dp = is_dp; info->supports_edp = is_edp; - DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n", - port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt); + if (bdb_version >= 195) + info->supports_typec_usb = child->dp_usb_type_c; + + if (bdb_version >= 209) + info->supports_tbt = child->tbt; + + DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n", + port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt, + info->supports_typec_usb, info->supports_tbt); if (is_edp && is_dvi) DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n", @@ -1492,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) { - enum port port; + const struct child_device_config *child; + int i; if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) return; - if (!dev_priv->vbt.child_dev_num) - return; - if (bdb_version < 155) return; - for (port = PORT_A; port < I915_MAX_PORTS; port++) - parse_ddi_port(dev_priv, port, bdb_version); + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + child = dev_priv->vbt.child_dev + i; + + parse_ddi_port(dev_priv, child, bdb_version); + } } static void @@ -1657,9 +1700,17 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; + /* + * VBT has the TypeC mode (native,TBT/USB) and we don't want + * to detect it. + */ + if (intel_port_is_tc(dev_priv, port)) + continue; + info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_hdmi = info->supports_dvi; info->supports_dp = (port != PORT_E); + info->supports_edp = (port == PORT_A); } } @@ -1940,6 +1991,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por }; int i; + if (HAS_DDI(dev_priv)) { + const struct ddi_vbt_port_info *port_info = + &dev_priv->vbt.ddi_port_info[port]; + + return port_info->supports_dp || + port_info->supports_dvi || + port_info->supports_hdmi; + } + /* FIXME maybe deal with port A as well? */ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) return false; @@ -2071,8 +2131,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, dvo_port = child->dvo_port; if (dvo_port == DVO_PORT_MIPIA || - (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) || - (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) { + (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) || + (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) { if (port) *port = dvo_port - DVO_PORT_MIPIA; return true; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 447c5256f63a..09ed90c0ba00 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -29,180 +29,144 @@ #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq) -static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) +static void irq_enable(struct intel_engine_cs *engine) { - struct intel_wait *wait; - unsigned int result = 0; - - lockdep_assert_held(&b->irq_lock); - - wait = b->irq_wait; - if (wait) { - /* - * N.B. Since task_asleep() and ttwu are not atomic, the - * waiter may actually go to sleep after the check, causing - * us to suppress a valid wakeup. We prefer to reduce the - * number of false positive missed_breadcrumb() warnings - * at the expense of a few false negatives, as it it easy - * to trigger a false positive under heavy load. Enough - * signal should remain from genuine missed_breadcrumb() - * for us to detect in CI. - */ - bool was_asleep = task_asleep(wait->tsk); - - result = ENGINE_WAKEUP_WAITER; - if (wake_up_process(wait->tsk) && was_asleep) - result |= ENGINE_WAKEUP_ASLEEP; - } + if (!engine->irq_enable) + return; - return result; + /* Caller disables interrupts */ + spin_lock(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock(&engine->i915->irq_lock); } -unsigned int intel_engine_wakeup(struct intel_engine_cs *engine) +static void irq_disable(struct intel_engine_cs *engine) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned long flags; - unsigned int result; - - spin_lock_irqsave(&b->irq_lock, flags); - result = __intel_breadcrumbs_wakeup(b); - spin_unlock_irqrestore(&b->irq_lock, flags); - - return result; -} + if (!engine->irq_disable) + return; -static unsigned long wait_timeout(void) -{ - return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); + /* Caller disables interrupts */ + spin_lock(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock(&engine->i915->irq_lock); } -static noinline void missed_breadcrumb(struct intel_engine_cs *engine) +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) { - if (GEM_SHOW_DEBUG()) { - struct drm_printer p = drm_debug_printer(__func__); + lockdep_assert_held(&b->irq_lock); - intel_engine_dump(engine, &p, - "%s missed breadcrumb at %pS\n", - engine->name, __builtin_return_address(0)); - } + GEM_BUG_ON(!b->irq_enabled); + if (!--b->irq_enabled) + irq_disable(container_of(b, + struct intel_engine_cs, + breadcrumbs)); - set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); + b->irq_armed = false; } -static void intel_breadcrumbs_hangcheck(struct timer_list *t) +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) { - struct intel_engine_cs *engine = - from_timer(engine, t, breadcrumbs.hangcheck); struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned int irq_count; if (!b->irq_armed) return; - irq_count = READ_ONCE(b->irq_count); - if (b->hangcheck_interrupts != irq_count) { - b->hangcheck_interrupts = irq_count; - mod_timer(&b->hangcheck, wait_timeout()); - return; - } + spin_lock_irq(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock_irq(&b->irq_lock); +} - /* We keep the hangcheck timer alive until we disarm the irq, even - * if there are no waiters at present. - * - * If the waiter was currently running, assume it hasn't had a chance - * to process the pending interrupt (e.g, low priority task on a loaded - * system) and wait until it sleeps before declaring a missed interrupt. - * - * If the waiter was asleep (and not even pending a wakeup), then we - * must have missed an interrupt as the GPU has stopped advancing - * but we still have a waiter. Assuming all batches complete within - * DRM_I915_HANGCHECK_JIFFIES [1.5s]! - */ - if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) { - missed_breadcrumb(engine); - mod_timer(&b->fake_irq, jiffies + 1); - } else { - mod_timer(&b->hangcheck, wait_timeout()); - } +static inline bool __request_completed(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); } -static void intel_breadcrumbs_fake_irq(struct timer_list *t) +bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) { - struct intel_engine_cs *engine = - from_timer(engine, t, breadcrumbs.fake_irq); struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_context *ce, *cn; + struct list_head *pos, *next; + LIST_HEAD(signal); - /* - * The timer persists in case we cannot enable interrupts, - * or if we have previously seen seqno/interrupt incoherency - * ("missed interrupt" syndrome, better known as a "missed breadcrumb"). - * Here the worker will wake up every jiffie in order to kick the - * oldest waiter to do the coherent seqno check. - */ + spin_lock(&b->irq_lock); - spin_lock_irq(&b->irq_lock); - if (b->irq_armed && !__intel_breadcrumbs_wakeup(b)) - __intel_engine_disarm_breadcrumbs(engine); - spin_unlock_irq(&b->irq_lock); - if (!b->irq_armed) - return; + if (b->irq_armed && list_empty(&b->signalers)) + __intel_breadcrumbs_disarm_irq(b); - /* If the user has disabled the fake-irq, restore the hangchecking */ - if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) { - mod_timer(&b->hangcheck, wait_timeout()); - return; - } + list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { + GEM_BUG_ON(list_empty(&ce->signals)); - mod_timer(&b->fake_irq, jiffies + 1); -} + list_for_each_safe(pos, next, &ce->signals) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); -static void irq_enable(struct intel_engine_cs *engine) -{ - /* - * FIXME: Ideally we want this on the API boundary, but for the - * sake of testing with mock breadcrumbs (no HW so unable to - * enable irqs) we place it deep within the bowels, at the point - * of no return. - */ - GEM_BUG_ON(!intel_irqs_enabled(engine->i915)); + if (!__request_completed(rq)) + break; - /* Enabling the IRQ may miss the generation of the interrupt, but - * we still need to force the barrier before reading the seqno, - * just in case. - */ - set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)); - /* Caller disables interrupts */ - if (engine->irq_enable) { - spin_lock(&engine->i915->irq_lock); - engine->irq_enable(engine); - spin_unlock(&engine->i915->irq_lock); + /* + * Queue for execution after dropping the signaling + * spinlock as the callback chain may end up adding + * more signalers to the same context or engine. + */ + i915_request_get(rq); + + /* + * We may race with direct invocation of + * dma_fence_signal(), e.g. i915_request_retire(), + * so we need to acquire our reference to the request + * before we cancel the breadcrumb. + */ + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + list_add_tail(&rq->signal_link, &signal); + } + + /* + * We process the list deletion in bulk, only using a list_add + * (not list_move) above but keeping the status of + * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. + */ + if (!list_is_first(pos, &ce->signals)) { + /* Advance the list to the first incomplete request */ + __list_del_many(&ce->signals, pos); + if (&ce->signals == pos) /* now empty */ + list_del_init(&ce->signal_link); + } } -} -static void irq_disable(struct intel_engine_cs *engine) -{ - /* Caller disables interrupts */ - if (engine->irq_disable) { - spin_lock(&engine->i915->irq_lock); - engine->irq_disable(engine); - spin_unlock(&engine->i915->irq_lock); + spin_unlock(&b->irq_lock); + + list_for_each_safe(pos, next, &signal) { + struct i915_request *rq = + list_entry(pos, typeof(*rq), signal_link); + + dma_fence_signal(&rq->fence); + i915_request_put(rq); } + + return !list_empty(&signal); } -void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) +bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; + bool result; - lockdep_assert_held(&b->irq_lock); - GEM_BUG_ON(b->irq_wait); - GEM_BUG_ON(!b->irq_armed); + local_irq_disable(); + result = intel_engine_breadcrumbs_irq(engine); + local_irq_enable(); - GEM_BUG_ON(!b->irq_enabled); - if (!--b->irq_enabled) - irq_disable(engine); + return result; +} - b->irq_armed = false; +static void signal_irq_work(struct irq_work *work) +{ + struct intel_engine_cs *engine = + container_of(work, typeof(*engine), breadcrumbs.irq_work); + + intel_engine_breadcrumbs_irq(engine); } void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine) @@ -227,666 +191,155 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine) spin_unlock_irq(&b->irq_lock); } -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct intel_wait *wait, *n; - - if (!b->irq_armed) - return; - - /* - * We only disarm the irq when we are idle (all requests completed), - * so if the bottom-half remains asleep, it missed the request - * completion. - */ - if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) - missed_breadcrumb(engine); - - spin_lock_irq(&b->rb_lock); - - spin_lock(&b->irq_lock); - b->irq_wait = NULL; - if (b->irq_armed) - __intel_engine_disarm_breadcrumbs(engine); - spin_unlock(&b->irq_lock); - - rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) { - GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno)); - RB_CLEAR_NODE(&wait->node); - wake_up_process(wait->tsk); - } - b->waiters = RB_ROOT; - - spin_unlock_irq(&b->rb_lock); -} - -static bool use_fake_irq(const struct intel_breadcrumbs *b) -{ - const struct intel_engine_cs *engine = - container_of(b, struct intel_engine_cs, breadcrumbs); - - if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) - return false; - - /* - * Only start with the heavy weight fake irq timer if we have not - * seen any interrupts since enabling it the first time. If the - * interrupts are still arriving, it means we made a mistake in our - * engine->seqno_barrier(), a timing error that should be transient - * and unlikely to reoccur. - */ - return READ_ONCE(b->irq_count) == b->hangcheck_interrupts; -} - -static void enable_fake_irq(struct intel_breadcrumbs *b) -{ - /* Ensure we never sleep indefinitely */ - if (!b->irq_enabled || use_fake_irq(b)) - mod_timer(&b->fake_irq, jiffies + 1); - else - mod_timer(&b->hangcheck, wait_timeout()); -} - -static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) +static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { struct intel_engine_cs *engine = container_of(b, struct intel_engine_cs, breadcrumbs); - struct drm_i915_private *i915 = engine->i915; - bool enabled; lockdep_assert_held(&b->irq_lock); if (b->irq_armed) - return false; + return; - /* The breadcrumb irq will be disarmed on the interrupt after the + /* + * The breadcrumb irq will be disarmed on the interrupt after the * waiters are signaled. This gives us a single interrupt window in * which we can add a new waiter and avoid the cost of re-enabling * the irq. */ b->irq_armed = true; - if (I915_SELFTEST_ONLY(b->mock)) { - /* For our mock objects we want to avoid interaction - * with the real hardware (which is not set up). So - * we simply pretend we have enabled the powerwell - * and the irq, and leave it up to the mock - * implementation to call intel_engine_wakeup() - * itself when it wants to simulate a user interrupt, - */ - return true; - } - - /* Since we are waiting on a request, the GPU should be busy + /* + * Since we are waiting on a request, the GPU should be busy * and should have its own rpm reference. This is tracked * by i915->gt.awake, we can forgo holding our own wakref * for the interrupt as before i915->gt.awake is released (when * the driver is idle) we disarm the breadcrumbs. */ - /* No interrupts? Kick the waiter every jiffie! */ - enabled = false; - if (!b->irq_enabled++ && - !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) { + if (!b->irq_enabled++) irq_enable(engine); - enabled = true; - } - - enable_fake_irq(b); - return enabled; -} - -static inline struct intel_wait *to_wait(struct rb_node *node) -{ - return rb_entry(node, struct intel_wait, node); -} - -static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, - struct intel_wait *wait) -{ - lockdep_assert_held(&b->rb_lock); - GEM_BUG_ON(b->irq_wait == wait); - - /* - * This request is completed, so remove it from the tree, mark it as - * complete, and *then* wake up the associated task. N.B. when the - * task wakes up, it will find the empty rb_node, discern that it - * has already been removed from the tree and skip the serialisation - * of the b->rb_lock and b->irq_lock. This means that the destruction - * of the intel_wait is not serialised with the interrupt handler - * by the waiter - it must instead be serialised by the caller. - */ - rb_erase(&wait->node, &b->waiters); - RB_CLEAR_NODE(&wait->node); - - if (wait->tsk->state != TASK_RUNNING) - wake_up_process(wait->tsk); /* implicit smp_wmb() */ -} - -static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine, - struct rb_node *next) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - spin_lock(&b->irq_lock); - GEM_BUG_ON(!b->irq_armed); - GEM_BUG_ON(!b->irq_wait); - b->irq_wait = to_wait(next); - spin_unlock(&b->irq_lock); - - /* We always wake up the next waiter that takes over as the bottom-half - * as we may delegate not only the irq-seqno barrier to the next waiter - * but also the task of waking up concurrent waiters. - */ - if (next) - wake_up_process(to_wait(next)->tsk); } -static bool __intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node **p, *parent, *completed; - bool first, armed; - u32 seqno; - - GEM_BUG_ON(!wait->seqno); - - /* Insert the request into the retirement ordered list - * of waiters by walking the rbtree. If we are the oldest - * seqno in the tree (the first to be retired), then - * set ourselves as the bottom-half. - * - * As we descend the tree, prune completed branches since we hold the - * spinlock we know that the first_waiter must be delayed and can - * reduce some of the sequential wake up latency if we take action - * ourselves and wake up the completed tasks in parallel. Also, by - * removing stale elements in the tree, we may be able to reduce the - * ping-pong between the old bottom-half and ourselves as first-waiter. - */ - armed = false; - first = true; - parent = NULL; - completed = NULL; - seqno = intel_engine_get_seqno(engine); - - /* If the request completed before we managed to grab the spinlock, - * return now before adding ourselves to the rbtree. We let the - * current bottom-half handle any pending wakeups and instead - * try and get out of the way quickly. - */ - if (i915_seqno_passed(seqno, wait->seqno)) { - RB_CLEAR_NODE(&wait->node); - return first; - } - - p = &b->waiters.rb_node; - while (*p) { - parent = *p; - if (wait->seqno == to_wait(parent)->seqno) { - /* We have multiple waiters on the same seqno, select - * the highest priority task (that with the smallest - * task->prio) to serve as the bottom-half for this - * group. - */ - if (wait->tsk->prio > to_wait(parent)->tsk->prio) { - p = &parent->rb_right; - first = false; - } else { - p = &parent->rb_left; - } - } else if (i915_seqno_passed(wait->seqno, - to_wait(parent)->seqno)) { - p = &parent->rb_right; - if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) - completed = parent; - else - first = false; - } else { - p = &parent->rb_left; - } - } - rb_link_node(&wait->node, parent, p); - rb_insert_color(&wait->node, &b->waiters); - - if (first) { - spin_lock(&b->irq_lock); - b->irq_wait = wait; - /* After assigning ourselves as the new bottom-half, we must - * perform a cursory check to prevent a missed interrupt. - * Either we miss the interrupt whilst programming the hardware, - * or if there was a previous waiter (for a later seqno) they - * may be woken instead of us (due to the inherent race - * in the unlocked read of b->irq_seqno_bh in the irq handler) - * and so we miss the wake up. - */ - armed = __intel_breadcrumbs_enable_irq(b); - spin_unlock(&b->irq_lock); - } - if (completed) { - /* Advance the bottom-half (b->irq_wait) before we wake up - * the waiters who may scribble over their intel_wait - * just as the interrupt handler is dereferencing it via - * b->irq_wait. - */ - if (!first) { - struct rb_node *next = rb_next(completed); - GEM_BUG_ON(next == &wait->node); - __intel_breadcrumbs_next(engine, next); - } - - do { - struct intel_wait *crumb = to_wait(completed); - completed = rb_prev(completed); - __intel_breadcrumbs_finish(b, crumb); - } while (completed); - } - - GEM_BUG_ON(!b->irq_wait); - GEM_BUG_ON(!b->irq_armed); - GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node); + spin_lock_init(&b->irq_lock); + INIT_LIST_HEAD(&b->signalers); - return armed; + init_irq_work(&b->irq_work, signal_irq_work); } -bool intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) +void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; - bool armed; - - spin_lock_irq(&b->rb_lock); - armed = __intel_engine_add_wait(engine, wait); - spin_unlock_irq(&b->rb_lock); - if (armed) - return armed; - - /* Make the caller recheck if its request has already started. */ - return intel_engine_has_started(engine, wait->seqno); -} + unsigned long flags; -static inline bool chain_wakeup(struct rb_node *rb, int priority) -{ - return rb && to_wait(rb)->tsk->prio <= priority; -} + spin_lock_irqsave(&b->irq_lock, flags); -static inline int wakeup_priority(struct intel_breadcrumbs *b, - struct task_struct *tsk) -{ - if (tsk == b->signaler) - return INT_MIN; + if (b->irq_enabled) + irq_enable(engine); else - return tsk->prio; -} - -static void __intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - lockdep_assert_held(&b->rb_lock); - - if (RB_EMPTY_NODE(&wait->node)) - goto out; - - if (b->irq_wait == wait) { - const int priority = wakeup_priority(b, wait->tsk); - struct rb_node *next; - - /* We are the current bottom-half. Find the next candidate, - * the first waiter in the queue on the remaining oldest - * request. As multiple seqnos may complete in the time it - * takes us to wake up and find the next waiter, we have to - * wake up that waiter for it to perform its own coherent - * completion check. - */ - next = rb_next(&wait->node); - if (chain_wakeup(next, priority)) { - /* If the next waiter is already complete, - * wake it up and continue onto the next waiter. So - * if have a small herd, they will wake up in parallel - * rather than sequentially, which should reduce - * the overall latency in waking all the completed - * clients. - * - * However, waking up a chain adds extra latency to - * the first_waiter. This is undesirable if that - * waiter is a high priority task. - */ - u32 seqno = intel_engine_get_seqno(engine); - - while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { - struct rb_node *n = rb_next(next); - - __intel_breadcrumbs_finish(b, to_wait(next)); - next = n; - if (!chain_wakeup(next, priority)) - break; - } - } - - __intel_breadcrumbs_next(engine, next); - } else { - GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); - } - - GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); - rb_erase(&wait->node, &b->waiters); - RB_CLEAR_NODE(&wait->node); + irq_disable(engine); -out: - GEM_BUG_ON(b->irq_wait == wait); - GEM_BUG_ON(rb_first(&b->waiters) != - (b->irq_wait ? &b->irq_wait->node : NULL)); + spin_unlock_irqrestore(&b->irq_lock, flags); } -void intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait) +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - /* Quick check to see if this waiter was already decoupled from - * the tree by the bottom-half to avoid contention on the spinlock - * by the herd. - */ - if (RB_EMPTY_NODE(&wait->node)) { - GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait); - return; - } - - spin_lock_irq(&b->rb_lock); - __intel_engine_remove_wait(engine, wait); - spin_unlock_irq(&b->rb_lock); } -static void signaler_set_rtpriority(void) +bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); -} + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; -static int intel_breadcrumbs_signaler(void *arg) -{ - struct intel_engine_cs *engine = arg; - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct i915_request *rq, *n; + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - /* Install ourselves with high priority to reduce signalling latency */ - signaler_set_rtpriority(); + if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) + return true; - do { - bool do_schedule = true; - LIST_HEAD(list); - u32 seqno; + spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) && + !__request_completed(rq)) { + struct intel_context *ce = rq->hw_context; + struct list_head *pos; - set_current_state(TASK_INTERRUPTIBLE); - if (list_empty(&b->signals)) - goto sleep; + __intel_breadcrumbs_arm_irq(b); /* - * We are either woken up by the interrupt bottom-half, - * or by a client adding a new signaller. In both cases, - * the GPU seqno may have advanced beyond our oldest signal. - * If it has, propagate the signal, remove the waiter and - * check again with the next oldest signal. Otherwise we - * need to wait for a new interrupt from the GPU or for - * a new client. + * We keep the seqno in retirement order, so we can break + * inside intel_engine_breadcrumbs_irq as soon as we've passed + * the last completed request (or seen a request that hasn't + * event started). We could iterate the timeline->requests list, + * but keeping a separate signalers_list has the advantage of + * hopefully being much smaller than the full list and so + * provides faster iteration and detection when there are no + * more interrupts required for this context. + * + * We typically expect to add new signalers in order, so we + * start looking for our insertion point from the tail of + * the list. */ - seqno = intel_engine_get_seqno(engine); - - spin_lock_irq(&b->rb_lock); - list_for_each_entry_safe(rq, n, &b->signals, signaling.link) { - u32 this = rq->signaling.wait.seqno; - - GEM_BUG_ON(!rq->signaling.wait.seqno); - - if (!i915_seqno_passed(seqno, this)) - break; - - if (likely(this == i915_request_global_seqno(rq))) { - __intel_engine_remove_wait(engine, - &rq->signaling.wait); + list_for_each_prev(pos, &ce->signals) { + struct i915_request *it = + list_entry(pos, typeof(*it), signal_link); - rq->signaling.wait.seqno = 0; - __list_del_entry(&rq->signaling.link); - - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &rq->fence.flags)) { - list_add_tail(&rq->signaling.link, - &list); - i915_request_get(rq); - } - } - } - spin_unlock_irq(&b->rb_lock); - - if (!list_empty(&list)) { - local_bh_disable(); - list_for_each_entry_safe(rq, n, &list, signaling.link) { - dma_fence_signal(&rq->fence); - GEM_BUG_ON(!i915_request_completed(rq)); - i915_request_put(rq); - } - local_bh_enable(); /* kick start the tasklets */ - - /* - * If the engine is saturated we may be continually - * processing completed requests. This angers the - * NMI watchdog if we never let anything else - * have access to the CPU. Let's pretend to be nice - * and relinquish the CPU if we burn through the - * entire RT timeslice! - */ - do_schedule = need_resched(); - } - - if (unlikely(do_schedule)) { - /* Before we sleep, check for a missed seqno */ - if (current->state & TASK_NORMAL && - !list_empty(&b->signals) && - engine->irq_seqno_barrier && - test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, - &engine->irq_posted)) { - engine->irq_seqno_barrier(engine); - intel_engine_wakeup(engine); - } - -sleep: - if (kthread_should_park()) - kthread_parkme(); - - if (unlikely(kthread_should_stop())) + if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno)) break; - - schedule(); } - } while (1); - __set_current_state(TASK_RUNNING); + list_add(&rq->signal_link, pos); + if (pos == &ce->signals) /* catch transitions from empty list */ + list_move_tail(&ce->signal_link, &b->signalers); - return 0; -} - -static void insert_signal(struct intel_breadcrumbs *b, - struct i915_request *request, - const u32 seqno) -{ - struct i915_request *iter; - - lockdep_assert_held(&b->rb_lock); - - /* - * A reasonable assumption is that we are called to add signals - * in sequence, as the requests are submitted for execution and - * assigned a global_seqno. This will be the case for the majority - * of internally generated signals (inter-engine signaling). - * - * Out of order waiters triggering random signaling enabling will - * be more problematic, but hopefully rare enough and the list - * small enough that the O(N) insertion sort is not an issue. - */ - - list_for_each_entry_reverse(iter, &b->signals, signaling.link) - if (i915_seqno_passed(seqno, iter->signaling.wait.seqno)) - break; - - list_add(&request->signaling.link, &iter->signaling.link); -} - -bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup) -{ - struct intel_engine_cs *engine = request->engine; - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct intel_wait *wait = &request->signaling.wait; - u32 seqno; - - /* - * Note that we may be called from an interrupt handler on another - * device (e.g. nouveau signaling a fence completion causing us - * to submit a request, and so enable signaling). As such, - * we need to make sure that all other users of b->rb_lock protect - * against interrupts, i.e. use spin_lock_irqsave. - */ - - /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */ - GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&request->lock); - - seqno = i915_request_global_seqno(request); - if (!seqno) /* will be enabled later upon execution */ - return true; - - GEM_BUG_ON(wait->seqno); - wait->tsk = b->signaler; - wait->request = request; - wait->seqno = seqno; - - /* - * Add ourselves into the list of waiters, but registering our - * bottom-half as the signaller thread. As per usual, only the oldest - * waiter (not just signaller) is tasked as the bottom-half waking - * up all completed waiters after the user interrupt. - * - * If we are the oldest waiter, enable the irq (after which we - * must double check that the seqno did not complete). - */ - spin_lock(&b->rb_lock); - insert_signal(b, request, seqno); - wakeup &= __intel_engine_add_wait(engine, wait); - spin_unlock(&b->rb_lock); - - if (wakeup) { - wake_up_process(b->signaler); - return !intel_wait_complete(wait); + set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); } + spin_unlock(&b->irq_lock); - return true; + return !__request_completed(rq); } -void intel_engine_cancel_signaling(struct i915_request *request) +void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_engine_cs *engine = request->engine; - struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&request->lock); - - if (!READ_ONCE(request->signaling.wait.seqno)) + if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) return; - spin_lock(&b->rb_lock); - __intel_engine_remove_wait(engine, &request->signaling.wait); - if (fetch_and_zero(&request->signaling.wait.seqno)) - __list_del_entry(&request->signaling.link); - spin_unlock(&b->rb_lock); -} - -int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct task_struct *tsk; - - spin_lock_init(&b->rb_lock); - spin_lock_init(&b->irq_lock); - - timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0); - timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0); - - INIT_LIST_HEAD(&b->signals); - - /* Spawn a thread to provide a common bottom-half for all signals. - * As this is an asynchronous interface we cannot steal the current - * task for handling the bottom-half to the user interrupt, therefore - * we create a thread to do the coherent seqno dance after the - * interrupt and then signal the waitqueue (via the dma-buf/fence). - */ - tsk = kthread_run(intel_breadcrumbs_signaler, engine, - "i915/signal:%d", engine->id); - if (IS_ERR(tsk)) - return PTR_ERR(tsk); - - b->signaler = tsk; - - return 0; -} - -static void cancel_fake_irq(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */ - del_timer_sync(&b->hangcheck); - clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); -} - -void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned long flags; - - spin_lock_irqsave(&b->irq_lock, flags); - - /* - * Leave the fake_irq timer enabled (if it is running), but clear the - * bit so that it turns itself off on its next wake up and goes back - * to the long hangcheck interval if still required. - */ - clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); - - if (b->irq_enabled) - irq_enable(engine); - else - irq_disable(engine); + spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { + struct intel_context *ce = rq->hw_context; - /* - * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the - * GPU is active and may have already executed the MI_USER_INTERRUPT - * before the CPU is ready to receive. However, the engine is currently - * idle (we haven't started it yet), there is no possibility for a - * missed interrupt as we enabled the irq and so we can clear the - * immediate wakeup (until a real interrupt arrives for the waiter). - */ - clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); + list_del(&rq->signal_link); + if (list_empty(&ce->signals)) + list_del_init(&ce->signal_link); - spin_unlock_irqrestore(&b->irq_lock, flags); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + } + spin_unlock(&b->irq_lock); } -void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_context *ce; + struct i915_request *rq; - /* The engines should be idle and all requests accounted for! */ - WARN_ON(READ_ONCE(b->irq_wait)); - WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); - WARN_ON(!list_empty(&b->signals)); + if (list_empty(&b->signalers)) + return; - if (!IS_ERR_OR_NULL(b->signaler)) - kthread_stop(b->signaler); + drm_printf(p, "Signals:\n"); - cancel_fake_irq(engine); + spin_lock_irq(&b->irq_lock); + list_for_each_entry(ce, &b->signalers, signal_link) { + list_for_each_entry(rq, &ce->signals, signal_link) { + drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", + rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + } + } + spin_unlock_irq(&b->irq_lock); } - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/intel_breadcrumbs.c" -#endif diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 25e3aba9cded..b8cd481f5e33 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) }; const unsigned int *vco_table; unsigned int vco; - uint8_t tmp = 0; + u8 tmp = 0; /* FIXME other chipsets? */ if (IS_GM45(dev_priv)) @@ -234,7 +234,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) else return 0; - tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); + tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? + HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) @@ -249,13 +250,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { struct pci_dev *pdev = dev_priv->drm.pdev; - static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; - static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; - static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; - static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; - const uint8_t *div_table; + static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; + static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; + static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; + static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 }; + const u8 *div_table; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -330,12 +331,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { struct pci_dev *pdev = dev_priv->drm.pdev; - static const uint8_t div_3200[] = { 16, 10, 8 }; - static const uint8_t div_4000[] = { 20, 12, 10 }; - static const uint8_t div_5333[] = { 24, 16, 14 }; - const uint8_t *div_table; + static const u8 div_3200[] = { 16, 10, 8 }; + static const u8 div_4000[] = { 20, 12, 10 }; + static const u8 div_5333[] = { 24, 16, 14 }; + const u8 *div_table; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -375,7 +376,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, { struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -403,8 +404,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, static void hsw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { - uint32_t lcpll = I915_READ(LCPLL_CTL); - uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; + u32 lcpll = I915_READ(LCPLL_CTL); + u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_state->cdclk = 800000; @@ -468,7 +469,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, cdclk_state->vco); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); mutex_unlock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv)) @@ -516,10 +517,12 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) } static void vlv_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; u32 val, cmd = cdclk_state->voltage_level; + intel_wakeref_t wakeref; switch (cdclk) { case 400000: @@ -539,14 +542,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, * a system suspend. So grab the PIPE-A domain, which covers * the HW blocks needed for the following programming. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); @@ -593,14 +596,16 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; u32 val, cmd = cdclk_state->voltage_level; + intel_wakeref_t wakeref; switch (cdclk) { case 333333: @@ -619,14 +624,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, * a system suspend. So grab the PIPE-A domain, which covers * the HW blocks needed for the following programming. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); @@ -637,7 +642,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); } static int bdw_calc_cdclk(int min_cdclk) @@ -670,8 +675,8 @@ static u8 bdw_calc_voltage_level(int cdclk) static void bdw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { - uint32_t lcpll = I915_READ(LCPLL_CTL); - uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; + u32 lcpll = I915_READ(LCPLL_CTL); + u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_state->cdclk = 800000; @@ -695,10 +700,11 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv, } static void bdw_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; - uint32_t val; + u32 val; int ret; if (WARN((I915_READ(LCPLL_CTL) & @@ -962,7 +968,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) DRM_ERROR("DPLL0 not locked\n"); @@ -976,16 +982,17 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) static void skl_dpll0_disable(struct drm_i915_private *dev_priv) { I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, - LCPLL1_CTL, LCPLL_PLL_LOCK, 0, - 1)) + if (intel_wait_for_register(&dev_priv->uncore, + LCPLL1_CTL, LCPLL_PLL_LOCK, 0, + 1)) DRM_ERROR("Couldn't disable DPLL0\n"); dev_priv->cdclk.hw.vco = 0; } static void skl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; @@ -1081,7 +1088,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t cdctl, expected; + u32 cdctl, expected; /* * check if the pre-os initialized the display @@ -1156,7 +1163,7 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco); cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk); - skl_set_cdclk(dev_priv, &cdclk_state); + skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -1174,7 +1181,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = 0; cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk); - skl_set_cdclk(dev_priv, &cdclk_state); + skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } static int bxt_calc_cdclk(int min_cdclk) @@ -1321,7 +1328,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) I915_WRITE(BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, 1)) DRM_ERROR("timeout waiting for DE PLL unlock\n"); @@ -1342,7 +1349,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, BXT_DE_PLL_LOCK, @@ -1353,7 +1360,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) } static void bxt_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; @@ -1406,11 +1414,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, bxt_de_pll_enable(dev_priv, vco); val = divider | skl_cdclk_decimal(cdclk); - /* - * FIXME if only the cd2x divider needs changing, it could be done - * without shutting off the pipe (if only one pipe is active). - */ - val |= BXT_CDCLK_CD2X_PIPE_NONE; + if (pipe == INVALID_PIPE) + val |= BXT_CDCLK_CD2X_PIPE_NONE; + else + val |= BXT_CDCLK_CD2X_PIPE(pipe); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. @@ -1419,6 +1426,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; I915_WRITE(CDCLK_CTL, val); + if (pipe != INVALID_PIPE) + intel_wait_for_vblank(dev_priv, pipe); + mutex_lock(&dev_priv->pcu_lock); /* * The timeout isn't specified, the 2ms used here is based on @@ -1523,7 +1533,7 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv) } cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_state); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -1541,7 +1551,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = 0; cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_state); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } static int cnl_calc_cdclk(int min_cdclk) @@ -1661,7 +1671,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) } static void cnl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; @@ -1702,13 +1713,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, cnl_cdclk_pll_enable(dev_priv, vco); val = divider | skl_cdclk_decimal(cdclk); - /* - * FIXME if only the cd2x divider needs changing, it could be done - * without shutting off the pipe (if only one pipe is active). - */ - val |= BXT_CDCLK_CD2X_PIPE_NONE; + if (pipe == INVALID_PIPE) + val |= BXT_CDCLK_CD2X_PIPE_NONE; + else + val |= BXT_CDCLK_CD2X_PIPE(pipe); I915_WRITE(CDCLK_CTL, val); + if (pipe != INVALID_PIPE) + intel_wait_for_vblank(dev_priv, pipe); + /* inform PCU of the change */ mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, @@ -1845,7 +1858,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) } static void icl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { unsigned int cdclk = cdclk_state->cdclk; unsigned int vco = cdclk_state->vco; @@ -1870,6 +1884,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, if (dev_priv->cdclk.hw.vco != vco) cnl_cdclk_pll_enable(dev_priv, vco); + /* + * On ICL CD2X_DIV can only be 1, so we'll never end up changing the + * divider here synchronized to a pipe while CDCLK is on, nor will we + * need the corresponding vblank wait. + */ I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE | skl_cdclk_decimal(cdclk)); @@ -2000,7 +2019,7 @@ sanitize: sanitized_state.voltage_level = icl_calc_voltage_level(sanitized_state.cdclk); - icl_set_cdclk(dev_priv, &sanitized_state); + icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } /** @@ -2018,7 +2037,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = 0; cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk); - icl_set_cdclk(dev_priv, &cdclk_state); + icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -2046,7 +2065,7 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state); + cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -2064,7 +2083,7 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = 0; cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state); + cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -2084,6 +2103,27 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, } /** + * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update + * @a: first CDCLK state + * @b: second CDCLK state + * + * Returns: + * True if the CDCLK states require just a cd2x divider update, false if not. + */ +bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b) +{ + /* Older hw doesn't have the capability */ + if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv)) + return false; + + return a->cdclk != b->cdclk && + a->vco == b->vco && + a->ref == b->ref; +} + +/** * intel_cdclk_changed - Determine if two CDCLK states are different * @a: first CDCLK state * @b: second CDCLK state @@ -2098,6 +2138,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a, a->voltage_level != b->voltage_level; } +/** + * intel_cdclk_swap_state - make atomic CDCLK configuration effective + * @state: atomic state + * + * This is the CDCLK version of drm_atomic_helper_swap_state() since the + * helper does not handle driver-specific global state. + * + * Similarly to the atomic helpers this function does a complete swap, + * i.e. it also puts the old state into @state. This is used by the commit + * code to determine how CDCLK has changed (for instance did it increase or + * decrease). + */ +void intel_cdclk_swap_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + swap(state->cdclk.logical, dev_priv->cdclk.logical); + swap(state->cdclk.actual, dev_priv->cdclk.actual); +} + void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, const char *context) { @@ -2111,12 +2171,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, * intel_set_cdclk - Push the CDCLK state to the hardware * @dev_priv: i915 device * @cdclk_state: new CDCLK state + * @pipe: pipe with which to synchronize the update * * Program the hardware based on the passed in CDCLK state, * if necessary. */ -void intel_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state) +static void intel_set_cdclk(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *cdclk_state, + enum pipe pipe) { if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state)) return; @@ -2126,7 +2188,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to"); - dev_priv->display.set_cdclk(dev_priv, cdclk_state); + dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe); if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state), "cdclk state doesn't match!\n")) { @@ -2135,12 +2197,52 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, } } +/** + * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware + * @dev_priv: i915 device + * @old_state: old CDCLK state + * @new_state: new CDCLK state + * @pipe: pipe with which to synchronize the update + * + * Program the hardware before updating the HW plane state based on the passed + * in CDCLK state, if necessary. + */ +void +intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *old_state, + const struct intel_cdclk_state *new_state, + enum pipe pipe) +{ + if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk) + intel_set_cdclk(dev_priv, new_state, pipe); +} + +/** + * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware + * @dev_priv: i915 device + * @old_state: old CDCLK state + * @new_state: new CDCLK state + * @pipe: pipe with which to synchronize the update + * + * Program the hardware after updating the HW plane state based on the passed + * in CDCLK state, if necessary. + */ +void +intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *old_state, + const struct intel_cdclk_state *new_state, + enum pipe pipe) +{ + if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk) + intel_set_cdclk(dev_priv, new_state, pipe); +} + static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, int pixel_rate) { if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return DIV_ROUND_UP(pixel_rate, 2); - else if (IS_GEN9(dev_priv) || + else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; else if (IS_CHERRYVIEW(dev_priv)) @@ -2176,7 +2278,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { /* Display WA #1145: glk,cnl */ min_cdclk = max(316800, min_cdclk); - } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) { + } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) { /* Display WA #1144: skl,bxt */ min_cdclk = max(432000, min_cdclk); } @@ -2185,19 +2287,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) /* * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. - * - * FIXME: Check the actual, not default, BCLK being used. - * - * FIXME: This does not depend on ->has_audio because the higher CDCLK - * is required for audio probe, also when there are no audio capable - * displays connected at probe time. This leads to unnecessarily high - * CDCLK when audio is not required. - * - * FIXME: This limit is only applied when there are displays connected - * at probe time. If we probe without displays, we'll still end up using - * the platform minimum CDCLK, failing audio probe. */ - if (INTEL_GEN(dev_priv) >= 9) + if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* @@ -2237,7 +2328,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state) intel_state->min_cdclk[i] = min_cdclk; } - min_cdclk = 0; + min_cdclk = intel_state->cdclk.force_min_cdclk; for_each_pipe(dev_priv, pipe) min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk); @@ -2298,7 +2389,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state) vlv_calc_voltage_level(dev_priv, cdclk); if (!intel_state->active_crtcs) { - cdclk = vlv_calc_cdclk(dev_priv, 0); + cdclk = vlv_calc_cdclk(dev_priv, + intel_state->cdclk.force_min_cdclk); intel_state->cdclk.actual.cdclk = cdclk; intel_state->cdclk.actual.voltage_level = @@ -2331,7 +2423,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) bdw_calc_voltage_level(cdclk); if (!intel_state->active_crtcs) { - cdclk = bdw_calc_cdclk(0); + cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk); intel_state->cdclk.actual.cdclk = cdclk; intel_state->cdclk.actual.voltage_level = @@ -2403,7 +2495,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) skl_calc_voltage_level(cdclk); if (!intel_state->active_crtcs) { - cdclk = skl_calc_cdclk(0, vco); + cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco); intel_state->cdclk.actual.vco = vco; intel_state->cdclk.actual.cdclk = cdclk; @@ -2442,10 +2534,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) if (!intel_state->active_crtcs) { if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(0); + cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk); vco = glk_de_pll_vco(dev_priv, cdclk); } else { - cdclk = bxt_calc_cdclk(0); + cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk); vco = bxt_de_pll_vco(dev_priv, cdclk); } @@ -2481,7 +2573,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state) cnl_compute_min_voltage_level(intel_state)); if (!intel_state->active_crtcs) { - cdclk = cnl_calc_cdclk(0); + cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk); vco = cnl_cdclk_pll_vco(dev_priv, cdclk); intel_state->cdclk.actual.vco = vco; @@ -2517,7 +2609,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) cnl_compute_min_voltage_level(intel_state)); if (!intel_state->active_crtcs) { - cdclk = icl_calc_cdclk(0, ref); + cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref); vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); intel_state->cdclk.actual.vco = vco; @@ -2537,7 +2629,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return 2 * max_cdclk_freq; - else if (IS_GEN9(dev_priv) || + else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) @@ -2558,7 +2650,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (dev_priv->cdclk.hw.ref == 24000) dev_priv->max_cdclk_freq = 648000; else @@ -2666,7 +2758,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, fraction) - 1); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) rawclk |= ICP_RAWCLK_NUM(numerator); } @@ -2688,7 +2780,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv) static int g4x_hrawclk(struct drm_i915_private *dev_priv) { - uint32_t clkcfg; + u32 clkcfg; /* hrawclock is 1/4 the FSB frequency */ clkcfg = I915_READ(CLKCFG); @@ -2721,7 +2813,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) */ void intel_update_rawclk(struct drm_i915_private *dev_priv) { - if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) dev_priv->rawclk_freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = pch_rawclk(dev_priv); @@ -2742,18 +2834,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.set_cdclk = chv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; - } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.set_cdclk = vlv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; - } else if (IS_BROADWELL(dev_priv)) { - dev_priv->display.set_cdclk = bdw_set_cdclk; + if (INTEL_GEN(dev_priv) >= 11) { + dev_priv->display.set_cdclk = icl_set_cdclk; + dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + } else if (IS_CANNONLAKE(dev_priv)) { + dev_priv->display.set_cdclk = cnl_set_cdclk; dev_priv->display.modeset_calc_cdclk = - bdw_modeset_calc_cdclk; + cnl_modeset_calc_cdclk; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = @@ -2762,32 +2849,37 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; - } else if (IS_CANNONLAKE(dev_priv)) { - dev_priv->display.set_cdclk = cnl_set_cdclk; + } else if (IS_BROADWELL(dev_priv)) { + dev_priv->display.set_cdclk = bdw_set_cdclk; dev_priv->display.modeset_calc_cdclk = - cnl_modeset_calc_cdclk; - } else if (IS_ICELAKE(dev_priv)) { - dev_priv->display.set_cdclk = icl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + bdw_modeset_calc_cdclk; + } else if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.set_cdclk = chv_set_cdclk; + dev_priv->display.modeset_calc_cdclk = + vlv_modeset_calc_cdclk; + } else if (IS_VALLEYVIEW(dev_priv)) { + dev_priv->display.set_cdclk = vlv_set_cdclk; + dev_priv->display.modeset_calc_cdclk = + vlv_modeset_calc_cdclk; } - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.get_cdclk = icl_get_cdclk; else if (IS_CANNONLAKE(dev_priv)) dev_priv->display.get_cdclk = cnl_get_cdclk; - else if (IS_GEN9_BC(dev_priv)) - dev_priv->display.get_cdclk = skl_get_cdclk; else if (IS_GEN9_LP(dev_priv)) dev_priv->display.get_cdclk = bxt_get_cdclk; + else if (IS_GEN9_BC(dev_priv)) + dev_priv->display.get_cdclk = skl_get_cdclk; else if (IS_BROADWELL(dev_priv)) dev_priv->display.get_cdclk = bdw_get_cdclk; else if (IS_HASWELL(dev_priv)) dev_priv->display.get_cdclk = hsw_get_cdclk; else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display.get_cdclk = vlv_get_cdclk; - else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) + else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; - else if (IS_GEN5(dev_priv)) + else if (IS_GEN(dev_priv, 5)) dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk; else if (IS_GM45(dev_priv)) dev_priv->display.get_cdclk = gm45_get_cdclk; diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 5127da286a2b..60f21a1fdbbe 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -40,23 +40,6 @@ #define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1)) #define LEGACY_LUT_LENGTH 256 - -/* Post offset values for RGB->YCBCR conversion */ -#define POSTOFF_RGB_TO_YUV_HI 0x800 -#define POSTOFF_RGB_TO_YUV_ME 0x100 -#define POSTOFF_RGB_TO_YUV_LO 0x800 - -/* - * These values are direct register values specified in the Bspec, - * for RGB->YUV conversion matrix (colorspace BT709) - */ -#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8 -#define CSC_RGB_TO_YUV_BU 0x37e80000 -#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0 -#define CSC_RGB_TO_YUV_BY 0xb5280000 -#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8 -#define CSC_RGB_TO_YUV_BV 0x1e080000 - /* * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point * format). This macro takes the coefficient we want transformed and the @@ -69,17 +52,57 @@ #define ILK_CSC_COEFF_FP(coeff, fbits) \ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) -#define ILK_CSC_COEFF_LIMITED_RANGE \ - ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9) -#define ILK_CSC_COEFF_1_0 \ - ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) +#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0 +#define ILK_CSC_COEFF_1_0 0x7800 + +#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) + +static const u16 ilk_csc_off_zero[3] = {}; + +static const u16 ilk_csc_coeff_identity[9] = { + ILK_CSC_COEFF_1_0, 0, 0, + 0, ILK_CSC_COEFF_1_0, 0, + 0, 0, ILK_CSC_COEFF_1_0, +}; + +static const u16 ilk_csc_postoff_limited_range[3] = { + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, +}; + +static const u16 ilk_csc_coeff_limited_range[9] = { + ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, + 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, + 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, +}; + +/* + * These values are direct register values specified in the Bspec, + * for RGB->YUV conversion matrix (colorspace BT709) + */ +static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { + 0x1e08, 0x9cc0, 0xb528, + 0x2ba8, 0x09d8, 0x37e8, + 0xbce8, 0x9ad8, 0x1e08, +}; + +/* Post offset values for RGB->YCBCR conversion */ +static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { + 0x0800, 0x0100, 0x0800, +}; -static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) +static bool lut_is_legacy(const struct drm_property_blob *lut) { - return !state->degamma_lut && - !state->ctm && - state->gamma_lut && - drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH; + return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH; +} + +static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state) +{ + return !crtc_state->base.degamma_lut && + !crtc_state->base.ctm && + crtc_state->base.gamma_lut && + lut_is_legacy(crtc_state->base.gamma_lut); } /* @@ -108,166 +131,206 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) return result; } -static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc) +static void ilk_update_pipe_csc(struct intel_crtc *crtc, + const u16 preoff[3], + const u16 coeff[9], + const u16 postoff[3]) { - int pipe = intel_crtc->pipe; - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]); + I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]); + I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]); - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU); + I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); + I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY); + I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); + I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV); + I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); + I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO); - I915_WRITE(PIPE_CSC_MODE(pipe), 0); + if (INTEL_GEN(dev_priv) >= 7) { + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]); + } } -static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) +static void icl_update_output_csc(struct intel_crtc *crtc, + const u16 preoff[3], + const u16 coeff[9], + const u16 postoff[3]) { - struct drm_crtc *crtc = crtc_state->crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int i, pipe = intel_crtc->pipe; - uint16_t coeffs[9] = { 0, }; - struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state); - bool limited_color_range = false; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8]); + + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); +} + +static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); /* * FIXME if there's a gamma LUT after the CSC, we should * do the range compression using the gamma LUT instead. */ - if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) - limited_color_range = intel_crtc_state->limited_color_range; + return crtc_state->limited_color_range && + (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_GEN_RANGE(dev_priv, 9, 10)); +} - if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || - intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { - ilk_load_ycbcr_conversion_matrix(intel_crtc); - return; - } else if (crtc_state->ctm) { - struct drm_color_ctm *ctm = crtc_state->ctm->data; - const u64 *input; - u64 temp[9]; +static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, + u16 coeffs[9]) +{ + const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; + const u64 *input; + u64 temp[9]; + int i; - if (limited_color_range) - input = ctm_mult_by_limited(temp, ctm->matrix); - else - input = ctm->matrix; + if (ilk_csc_limited_range(crtc_state)) + input = ctm_mult_by_limited(temp, ctm->matrix); + else + input = ctm->matrix; + + /* + * Convert fixed point S31.32 input to format supported by the + * hardware. + */ + for (i = 0; i < 9; i++) { + u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; /* - * Convert fixed point S31.32 input to format supported by the + * Clamp input value to min/max supported by * hardware. */ - for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i]; - - /* - * Clamp input value to min/max supported by - * hardware. - */ - abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); - - /* sign bit */ - if (CTM_COEFF_NEGATIVE(input[i])) - coeffs[i] |= 1 << 15; - - if (abs_coeff < CTM_COEFF_0_125) - coeffs[i] |= (3 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 12); - else if (abs_coeff < CTM_COEFF_0_25) - coeffs[i] |= (2 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 11); - else if (abs_coeff < CTM_COEFF_0_5) - coeffs[i] |= (1 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 10); - else if (abs_coeff < CTM_COEFF_1_0) - coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); - else if (abs_coeff < CTM_COEFF_2_0) - coeffs[i] |= (7 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 8); - else - coeffs[i] |= (6 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 7); - } - } else { - /* - * Load an identity matrix if no coefficients are provided. - * - * TODO: Check what kind of values actually come out of the - * pipe with these coeff/postoff values and adjust to get the - * best accuracy. Perhaps we even need to take the bpc value - * into consideration. - */ - for (i = 0; i < 3; i++) { - if (limited_color_range) - coeffs[i * 3 + i] = - ILK_CSC_COEFF_LIMITED_RANGE; - else - coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0; - } + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); + + coeffs[i] = 0; + + /* sign bit */ + if (CTM_COEFF_NEGATIVE(input[i])) + coeffs[i] |= 1 << 15; + + if (abs_coeff < CTM_COEFF_0_125) + coeffs[i] |= (3 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 12); + else if (abs_coeff < CTM_COEFF_0_25) + coeffs[i] |= (2 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 11); + else if (abs_coeff < CTM_COEFF_0_5) + coeffs[i] |= (1 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 10); + else if (abs_coeff < CTM_COEFF_1_0) + coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); + else if (abs_coeff < CTM_COEFF_2_0) + coeffs[i] |= (7 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 8); + else + coeffs[i] |= (6 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 7); } +} - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16); - - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16); - - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16); - - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); +static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + bool limited_color_range = ilk_csc_limited_range(crtc_state); + + if (crtc_state->base.ctm) { + u16 coeff[9]; + + ilk_csc_convert_ctm(crtc_state, coeff); + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff, + limited_color_range ? + ilk_csc_postoff_limited_range : + ilk_csc_off_zero); + } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_rgb_to_ycbcr, + ilk_csc_postoff_rgb_to_ycbcr); + } else if (limited_color_range) { + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_limited_range, + ilk_csc_postoff_limited_range); + } else if (crtc_state->csc_enable) { + /* + * On GLK+ both pipe CSC and degamma LUT are controlled + * by csc_enable. Hence for the cases where the degama + * LUT is needed but CSC is not we need to load an + * identity matrix. + */ + WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv)); - if (INTEL_GEN(dev_priv) > 6) { - uint16_t postoff = 0; + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_identity, + ilk_csc_off_zero); + } - if (limited_color_range) - postoff = (16 * (1 << 12) / 255) & 0x1fff; + I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); +} - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); +static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(PIPE_CSC_MODE(pipe), 0); - } else { - uint32_t mode = CSC_MODE_YUV_TO_RGB; + if (crtc_state->base.ctm) { + u16 coeff[9]; - if (limited_color_range) - mode |= CSC_BLACK_SCREEN_OFFSET; + ilk_csc_convert_ctm(crtc_state, coeff); + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + coeff, ilk_csc_off_zero); + } - I915_WRITE(PIPE_CSC_MODE(pipe), mode); + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { + icl_update_output_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_rgb_to_ycbcr, + ilk_csc_postoff_rgb_to_ycbcr); + } else if (crtc_state->limited_color_range) { + icl_update_output_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_limited_range, + ilk_csc_postoff_limited_range); } + + I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } /* * Set up the pipe CSC unit on CherryView. */ -static void cherryview_load_csc_matrix(struct drm_crtc_state *state) +static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = to_intel_crtc(crtc)->pipe; - uint32_t mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - if (state->ctm) { - struct drm_color_ctm *ctm = state->ctm->data; - uint16_t coeffs[9] = { 0, }; + if (crtc_state->base.ctm) { + const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; + u16 coeffs[9] = {}; int i; for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - uint64_t abs_coeff = + u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; /* Round coefficient. */ @@ -293,35 +356,42 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); } - mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); - if (!crtc_state_is_legacy_gamma(state)) { - mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | - (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); - } - I915_WRITE(CGM_PIPE_MODE(pipe), mode); + I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode); } -void intel_color_set_csc(struct drm_crtc_state *crtc_state) +/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */ +static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color) { - struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + return (color->red & 0xff) << 16 | + (color->green & 0xff) << 8 | + (color->blue & 0xff); +} - if (dev_priv->display.load_csc_matrix) - dev_priv->display.load_csc_matrix(crtc_state); +/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */ +static u32 i965_lut_10p6_udw(const struct drm_color_lut *color) +{ + return (color->red >> 8) << 16 | + (color->green >> 8) << 8 | + (color->blue >> 8); +} + +static u32 ilk_lut_10(const struct drm_color_lut *color) +{ + return drm_color_lut_extract(color->red, 10) << 20 | + drm_color_lut_extract(color->green, 10) << 10 | + drm_color_lut_extract(color->blue, 10); } /* Loads the legacy palette/gamma unit for the CRTC. */ -static void i9xx_load_luts_internal(struct drm_crtc *crtc, - struct drm_property_blob *blob, - struct intel_crtc_state *crtc_state) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; +static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state, + const struct drm_property_blob *blob) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; int i; - if (HAS_GMCH_DISPLAY(dev_priv)) { + if (HAS_GMCH(dev_priv)) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else @@ -329,23 +399,15 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc, } if (blob) { - struct drm_color_lut *lut = blob->data; + const struct drm_color_lut *lut = blob->data; + for (i = 0; i < 256; i++) { - uint32_t word = + u32 word = (drm_color_lut_extract(lut[i].red, 8) << 16) | (drm_color_lut_extract(lut[i].green, 8) << 8) | drm_color_lut_extract(lut[i].blue, 8); - if (HAS_GMCH_DISPLAY(dev_priv)) - I915_WRITE(PALETTE(pipe, i), word); - else - I915_WRITE(LGC_PALETTE(pipe, i), word); - } - } else { - for (i = 0; i < 256; i++) { - uint32_t word = (i << 16) | (i << 8) | i; - - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) I915_WRITE(PALETTE(pipe, i), word); else I915_WRITE(LGC_PALETTE(pipe, i), word); @@ -353,138 +415,189 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc, } } -static void i9xx_load_luts(struct drm_crtc_state *crtc_state) +static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) { - i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut, - to_intel_crtc_state(crtc_state)); + i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut); } -/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */ -static void haswell_load_luts(struct drm_crtc_state *crtc_state) +static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = crtc_state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *intel_crtc_state = - to_intel_crtc_state(crtc_state); - bool reenable_ips = false; - - /* - * Workaround : Do not read or write the pipe palette/gamma data while - * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. - */ - if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled && - (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) { - hsw_disable_ips(intel_crtc_state); - reenable_ips = true; - } + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val; + + val = I915_READ(PIPECONF(pipe)); + val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); +} - intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; - I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); +static void ilk_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val; - i9xx_load_luts(crtc_state); + val = I915_READ(PIPECONF(pipe)); + val &= ~PIPECONF_GAMMA_MODE_MASK_ILK; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); - if (reenable_ips) - hsw_enable_ips(intel_crtc_state); + ilk_load_csc_matrix(crtc_state); } -static void bdw_load_degamma_lut(struct drm_crtc_state *state) +static void hsw_color_commit(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); - enum pipe pipe = to_intel_crtc(state->crtc)->pipe; - uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(PREC_PAL_INDEX(pipe), - PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT); + I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - if (state->degamma_lut) { - struct drm_color_lut *lut = state->degamma_lut->data; + ilk_load_csc_matrix(crtc_state); +} - for (i = 0; i < lut_size; i++) { - uint32_t word = - drm_color_lut_extract(lut[i].red, 10) << 20 | - drm_color_lut_extract(lut[i].green, 10) << 10 | - drm_color_lut_extract(lut[i].blue, 10); +static void skl_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val = 0; - I915_WRITE(PREC_PAL_DATA(pipe), word); - } - } else { - for (i = 0; i < lut_size; i++) { - uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + /* + * We don't (yet) allow userspace to control the pipe background color, + * so force it to black, but apply pipe gamma and CSC appropriately + * so that its handling will match how we program our planes. + */ + if (crtc_state->gamma_enable) + val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; + if (crtc_state->csc_enable) + val |= SKL_BOTTOM_COLOR_CSC_ENABLE; + I915_WRITE(SKL_BOTTOM_COLOR(pipe), val); + + I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); + + if (INTEL_GEN(dev_priv) >= 11) + icl_load_csc_matrix(crtc_state); + else + ilk_load_csc_matrix(crtc_state); +} - I915_WRITE(PREC_PAL_DATA(pipe), - (v << 20) | (v << 10) | v); - } +static void i965_load_lut_10p6(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut = blob->data; + int i, lut_size = drm_color_lut_size(blob); + enum pipe pipe = crtc->pipe; + + for (i = 0; i < lut_size - 1; i++) { + I915_WRITE(PALETTE(pipe, 2 * i + 0), + i965_lut_10p6_ldw(&lut[i])); + I915_WRITE(PALETTE(pipe, 2 * i + 1), + i965_lut_10p6_udw(&lut[i])); } + + I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red); + I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green); + I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue); } -static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) +static void i965_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); - enum pipe pipe = to_intel_crtc(state->crtc)->pipe; - uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; - - WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - I915_WRITE(PREC_PAL_INDEX(pipe), - (offset ? PAL_PREC_SPLIT_MODE : 0) | - PAL_PREC_AUTO_INCREMENT | - offset); + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + i9xx_load_luts(crtc_state); + else + i965_load_lut_10p6(crtc, gamma_lut); +} - if (state->gamma_lut) { - struct drm_color_lut *lut = state->gamma_lut->data; +static void ilk_load_lut_10(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut = blob->data; + int i, lut_size = drm_color_lut_size(blob); + enum pipe pipe = crtc->pipe; - for (i = 0; i < lut_size; i++) { - uint32_t word = - (drm_color_lut_extract(lut[i].red, 10) << 20) | - (drm_color_lut_extract(lut[i].green, 10) << 10) | - drm_color_lut_extract(lut[i].blue, 10); + for (i = 0; i < lut_size; i++) + I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i])); +} - I915_WRITE(PREC_PAL_DATA(pipe), word); - } +static void ilk_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - /* Program the max register to clamp values > 1.0. */ - i = lut_size - 1; - I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), - drm_color_lut_extract(lut[i].red, 16)); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), - drm_color_lut_extract(lut[i].green, 16)); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), - drm_color_lut_extract(lut[i].blue, 16)); - } else { - for (i = 0; i < lut_size; i++) { - uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + i9xx_load_luts(crtc_state); + else + ilk_load_lut_10(crtc, gamma_lut); +} - I915_WRITE(PREC_PAL_DATA(pipe), - (v << 20) | (v << 10) | v); - } +static int ivb_lut_10_size(u32 prec_index) +{ + if (prec_index & PAL_PREC_SPLIT_MODE) + return 512; + else + return 1024; +} - I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1); +/* + * IVB/HSW Bspec / PAL_PREC_INDEX: + * "Restriction : Index auto increment mode is not + * supported and must not be enabled." + */ +static void ivb_load_lut_10(struct intel_crtc *crtc, + const struct drm_property_blob *blob, + u32 prec_index) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int hw_lut_size = ivb_lut_10_size(prec_index); + const struct drm_color_lut *lut = blob->data; + int i, lut_size = drm_color_lut_size(blob); + enum pipe pipe = crtc->pipe; + + for (i = 0; i < hw_lut_size; i++) { + /* We discard half the user entries in split gamma mode */ + const struct drm_color_lut *entry = + &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; + + I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++); + I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry)); } + + /* + * Reset the index, otherwise it prevents the legacy palette to be + * written properly. + */ + I915_WRITE(PREC_PAL_INDEX(pipe), 0); } -/* Loads the palette/gamma unit for the CRTC on Broadwell+. */ -static void broadwell_load_luts(struct drm_crtc_state *state) +/* On BDW+ the index auto increment mode actually works */ +static void bdw_load_lut_10(struct intel_crtc *crtc, + const struct drm_property_blob *blob, + u32 prec_index) { - struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); - struct intel_crtc_state *intel_state = to_intel_crtc_state(state); - enum pipe pipe = to_intel_crtc(state->crtc)->pipe; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int hw_lut_size = ivb_lut_10_size(prec_index); + const struct drm_color_lut *lut = blob->data; + int i, lut_size = drm_color_lut_size(blob); + enum pipe pipe = crtc->pipe; - if (crtc_state_is_legacy_gamma(state)) { - haswell_load_luts(state); - return; - } + I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | + PAL_PREC_AUTO_INCREMENT); - bdw_load_degamma_lut(state); - bdw_load_gamma_lut(state, - INTEL_INFO(dev_priv)->color.degamma_lut_size); + for (i = 0; i < hw_lut_size; i++) { + /* We discard half the user entries in split gamma mode */ + const struct drm_color_lut *entry = + &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT; - I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT); - POSTING_READ(GAMMA_MODE(pipe)); + I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry)); + } /* * Reset the index, otherwise it prevents the legacy palette to be @@ -493,12 +606,82 @@ static void broadwell_load_luts(struct drm_crtc_state *state) I915_WRITE(PREC_PAL_INDEX(pipe), 0); } -static void glk_load_degamma_lut(struct drm_crtc_state *state) +static void ivb_load_lut_10_max(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->crtc->dev); - enum pipe pipe = to_intel_crtc(state->crtc)->pipe; - const uint32_t lut_size = 33; - uint32_t i; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* Program the max register to clamp values > 1.0. */ + I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); + I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); + I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); + + /* + * Program the gc max 2 register to clamp values > 1.0. + * ToDo: Extend the ABI to be able to program values + * from 3.0 to 7.0 + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16); + I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16); + I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16); + } +} + +static void ivb_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { + i9xx_load_luts(crtc_state); + } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { + ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | + PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); + ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | + PAL_PREC_INDEX_VALUE(512)); + } else { + const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; + + ivb_load_lut_10(crtc, blob, + PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); + } +} + +static void bdw_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { + i9xx_load_luts(crtc_state); + } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { + bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | + PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); + bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | + PAL_PREC_INDEX_VALUE(512)); + } else { + const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; + + bdw_load_lut_10(crtc, blob, + PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); + } +} + +static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data; + u32 i; /* * When setting the auto-increment bit, the hardware seems to @@ -508,66 +691,120 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state) I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0); I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT); + for (i = 0; i < lut_size; i++) { + /* + * First 33 entries represent range from 0 to 1.0 + * 34th and 35th entry will represent extended range + * inputs 3.0 and 7.0 respectively, currently clamped + * at 1.0. Since the precision is 16bit, the user + * value can be directly filled to register. + * The pipe degamma table in GLK+ onwards doesn't + * support different values per channel, so this just + * programs green value which will be equal to Red and + * Blue into the lut registers. + * ToDo: Extend to max 7.0. Enable 32 bit input value + * as compared to just 16 to achieve this. + */ + I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green); + } + + /* Clamp values > 1.0. */ + while (i++ < 35) + I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16); +} + +static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + u32 i; + /* - * FIXME: The pipe degamma table in geminilake doesn't support - * different values per channel, so this just loads a linear table. + * When setting the auto-increment bit, the hardware seems to + * ignore the index bits, so we need to reset it to index 0 + * separately. */ + I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0); + I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT); + for (i = 0; i < lut_size; i++) { - uint32_t v = (i * (1 << 16)) / (lut_size - 1); + u32 v = (i << 16) / (lut_size - 1); I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v); } /* Clamp values > 1.0. */ while (i++ < 35) - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16)); + I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16); } -static void glk_load_luts(struct drm_crtc_state *state) +static void glk_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *intel_state = to_intel_crtc_state(state); - enum pipe pipe = to_intel_crtc(crtc)->pipe; + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - glk_load_degamma_lut(state); + /* + * On GLK+ both pipe CSC and degamma LUT are controlled + * by csc_enable. Hence for the cases where the CSC is + * needed but degamma LUT is not we need to load a + * linear degamma LUT. In fact we'll just always load + * the degama LUT so that we don't have to reload + * it every time the pipe CSC is being enabled. + */ + if (crtc_state->base.degamma_lut) + glk_load_degamma_lut(crtc_state); + else + glk_load_degamma_lut_linear(crtc_state); - if (crtc_state_is_legacy_gamma(state)) { - haswell_load_luts(state); - return; + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { + i9xx_load_luts(crtc_state); + } else { + bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); } +} + +static void icl_load_luts(const struct intel_crtc_state *crtc_state) +{ + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - bdw_load_gamma_lut(state, 0); + if (crtc_state->base.degamma_lut) + glk_load_degamma_lut(crtc_state); - intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT; - I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT); - POSTING_READ(GAMMA_MODE(pipe)); + if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == + GAMMA_MODE_MODE_8BIT) { + i9xx_load_luts(crtc_state); + } else { + bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); + ivb_load_lut_10_max(crtc); + } } -/* Loads the palette/gamma unit for the CRTC on CherryView. */ -static void cherryview_load_luts(struct drm_crtc_state *state) +static void cherryview_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = state->crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - struct drm_color_lut *lut; - uint32_t i, lut_size; - uint32_t word0, word1; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + enum pipe pipe = crtc->pipe; - if (crtc_state_is_legacy_gamma(state)) { - /* Turn off degamma/gamma on CGM block. */ - I915_WRITE(CGM_PIPE_MODE(pipe), - (state->ctm ? CGM_PIPE_MODE_CSC : 0)); - i9xx_load_luts_internal(crtc, state->gamma_lut, - to_intel_crtc_state(state)); + cherryview_load_csc_matrix(crtc_state); + + if (crtc_state_is_legacy_gamma(crtc_state)) { + i9xx_load_luts(crtc_state); return; } - if (state->degamma_lut) { - lut = state->degamma_lut->data; - lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + if (degamma_lut) { + const struct drm_color_lut *lut = degamma_lut->data; + int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + for (i = 0; i < lut_size; i++) { + u32 word0, word1; + /* Write LUT in U0.14 format. */ word0 = (drm_color_lut_extract(lut[i].green, 14) << 16) | @@ -579,10 +816,13 @@ static void cherryview_load_luts(struct drm_crtc_state *state) } } - if (state->gamma_lut) { - lut = state->gamma_lut->data; - lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + if (gamma_lut) { + const struct drm_color_lut *lut = gamma_lut->data; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + for (i = 0; i < lut_size; i++) { + u32 word0, word1; + /* Write LUT in U0.10 format. */ word0 = (drm_color_lut_extract(lut[i].green, 10) << 16) | @@ -593,84 +833,445 @@ static void cherryview_load_luts(struct drm_crtc_state *state) I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1); } } +} - I915_WRITE(CGM_PIPE_MODE(pipe), - (state->ctm ? CGM_PIPE_MODE_CSC : 0) | - (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | - (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0)); +void intel_color_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + dev_priv->display.load_luts(crtc_state); +} + +void intel_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + dev_priv->display.color_commit(crtc_state); +} + +int intel_color_check(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + return dev_priv->display.color_check(crtc_state); +} + +static bool need_plane_update(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); /* - * Also program a linear LUT in the legacy block (behind the - * CGM block). + * On pre-SKL the pipe gamma enable and pipe csc enable for + * the pipe bottom color are configured via the primary plane. + * We have to reconfigure that even if the plane is inactive. */ - i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state)); + return crtc_state->active_planes & BIT(plane->id) || + (INTEL_GEN(dev_priv) < 9 && + plane->id == PLANE_PRIMARY); } -void intel_color_load_luts(struct drm_crtc_state *crtc_state) +static int +intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_plane *plane; + + if (!new_crtc_state->base.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) + return 0; - dev_priv->display.load_luts(crtc_state); + if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable && + new_crtc_state->csc_enable == old_crtc_state->csc_enable) + return 0; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + + if (!need_plane_update(plane, new_crtc_state)) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane->id); + } + + return 0; +} + +static int check_lut_size(const struct drm_property_blob *lut, int expected) +{ + int len; + + if (!lut) + return 0; + + len = drm_color_lut_size(lut); + if (len != expected) { + DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", + len, expected); + return -EINVAL; + } + + return 0; } -int intel_color_check(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) +static int check_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - size_t gamma_length, degamma_length; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + int gamma_length, degamma_length; + u32 gamma_tests, degamma_tests; + + /* Always allow legacy gamma LUT with no further checking. */ + if (crtc_state_is_legacy_gamma(crtc_state)) + return 0; + + /* C8 relies on its palette being stored in the legacy LUT */ + if (crtc_state->c8_planes) + return -EINVAL; degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size; + degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests; + gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests; + + if (check_lut_size(degamma_lut, degamma_length) || + check_lut_size(gamma_lut, gamma_length)) + return -EINVAL; + + if (drm_color_lut_check(degamma_lut, degamma_tests) || + drm_color_lut_check(gamma_lut, gamma_tests)) + return -EINVAL; + + return 0; +} + +static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable || + crtc_state_is_legacy_gamma(crtc_state)) + return GAMMA_MODE_MODE_8BIT; + else + return GAMMA_MODE_MODE_10BIT; /* i965+ only */ +} + +static int i9xx_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_enable = + crtc_state->base.gamma_lut && + !crtc_state->c8_planes; + + crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + return 0; +} + +static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) +{ + u32 cgm_mode = 0; + + if (crtc_state_is_legacy_gamma(crtc_state)) + return 0; + + if (crtc_state->base.degamma_lut) + cgm_mode |= CGM_PIPE_MODE_DEGAMMA; + if (crtc_state->base.ctm) + cgm_mode |= CGM_PIPE_MODE_CSC; + if (crtc_state->base.gamma_lut) + cgm_mode |= CGM_PIPE_MODE_GAMMA; + + return cgm_mode; +} + +/* + * CHV color pipeline: + * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma -> + * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10 + * + * We always bypass the WGC csc and use the CGM csc + * instead since it has degamma and better precision. + */ +static int chv_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; /* - * We allow both degamma & gamma luts at the right size or - * NULL. + * Pipe gamma will be used only for the legacy LUT. + * Otherwise we bypass it and use the CGM gamma instead. */ - if ((!crtc_state->degamma_lut || - drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) && - (!crtc_state->gamma_lut || - drm_color_lut_size(crtc_state->gamma_lut) == gamma_length)) - return 0; + crtc_state->gamma_enable = + crtc_state_is_legacy_gamma(crtc_state) && + !crtc_state->c8_planes; + + crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; + + crtc_state->cgm_mode = chv_cgm_mode(crtc_state); + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + return 0; +} + +static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable || + crtc_state_is_legacy_gamma(crtc_state)) + return GAMMA_MODE_MODE_8BIT; + else + return GAMMA_MODE_MODE_10BIT; +} + +static int ilk_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_enable = + crtc_state->base.gamma_lut && + !crtc_state->c8_planes; /* - * We also allow no degamma lut/ctm and a gamma lut at the legacy - * size (256 entries). + * We don't expose the ctm on ilk/snb currently, + * nor do we enable YCbCr output. Also RGB limited + * range output is handled by the hw automagically. */ - if (crtc_state_is_legacy_gamma(crtc_state)) + crtc_state->csc_enable = false; + + crtc_state->gamma_mode = ilk_gamma_mode(crtc_state); + + crtc_state->csc_mode = 0; + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + return 0; +} + +static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable || + crtc_state_is_legacy_gamma(crtc_state)) + return GAMMA_MODE_MODE_8BIT; + else if (crtc_state->base.gamma_lut && + crtc_state->base.degamma_lut) + return GAMMA_MODE_MODE_SPLIT; + else + return GAMMA_MODE_MODE_10BIT; +} + +static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) +{ + bool limited_color_range = ilk_csc_limited_range(crtc_state); + + /* + * CSC comes after the LUT in degamma, RGB->YCbCr, + * and RGB full->limited range mode. + */ + if (crtc_state->base.degamma_lut || + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + limited_color_range) return 0; - return -EINVAL; + return CSC_POSITION_BEFORE_GAMMA; +} + +static int ivb_color_check(struct intel_crtc_state *crtc_state) +{ + bool limited_color_range = ilk_csc_limited_range(crtc_state); + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_enable = + (crtc_state->base.gamma_lut || + crtc_state->base.degamma_lut) && + !crtc_state->c8_planes; + + crtc_state->csc_enable = + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + crtc_state->base.ctm || limited_color_range; + + crtc_state->gamma_mode = ivb_gamma_mode(crtc_state); + + crtc_state->csc_mode = ivb_csc_mode(crtc_state); + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + return 0; +} + +static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable || + crtc_state_is_legacy_gamma(crtc_state)) + return GAMMA_MODE_MODE_8BIT; + else + return GAMMA_MODE_MODE_10BIT; +} + +static int glk_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_enable = + crtc_state->base.gamma_lut && + !crtc_state->c8_planes; + + /* On GLK+ degamma LUT is controlled by csc_enable */ + crtc_state->csc_enable = + crtc_state->base.degamma_lut || + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + crtc_state->base.ctm || crtc_state->limited_color_range; + + crtc_state->gamma_mode = glk_gamma_mode(crtc_state); + + crtc_state->csc_mode = 0; + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + return 0; } -void intel_color_init(struct drm_crtc *crtc) +static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + u32 gamma_mode = 0; + + if (crtc_state->base.degamma_lut) + gamma_mode |= PRE_CSC_GAMMA_ENABLE; + + if (crtc_state->base.gamma_lut && + !crtc_state->c8_planes) + gamma_mode |= POST_CSC_GAMMA_ENABLE; - drm_mode_crtc_set_gamma_size(crtc, 256); + if (!crtc_state->base.gamma_lut || + crtc_state_is_legacy_gamma(crtc_state)) + gamma_mode |= GAMMA_MODE_MODE_8BIT; + else + gamma_mode |= GAMMA_MODE_MODE_10BIT; + + return gamma_mode; +} - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix; - dev_priv->display.load_luts = cherryview_load_luts; - } else if (IS_HASWELL(dev_priv)) { - dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; - dev_priv->display.load_luts = haswell_load_luts; - } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) || - IS_BROXTON(dev_priv)) { - dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; - dev_priv->display.load_luts = broadwell_load_luts; - } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { - dev_priv->display.load_csc_matrix = ilk_load_csc_matrix; - dev_priv->display.load_luts = glk_load_luts; +static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state) +{ + u32 csc_mode = 0; + + if (crtc_state->base.ctm) + csc_mode |= ICL_CSC_ENABLE; + + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + crtc_state->limited_color_range) + csc_mode |= ICL_OUTPUT_CSC_ENABLE; + + return csc_mode; +} + +static int icl_color_check(struct intel_crtc_state *crtc_state) +{ + int ret; + + ret = check_luts(crtc_state); + if (ret) + return ret; + + crtc_state->gamma_mode = icl_gamma_mode(crtc_state); + + crtc_state->csc_mode = icl_csc_mode(crtc_state); + + return 0; +} + +void intel_color_init(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0; + + drm_mode_crtc_set_gamma_size(&crtc->base, 256); + + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.color_check = chv_color_check; + dev_priv->display.color_commit = i9xx_color_commit; + dev_priv->display.load_luts = cherryview_load_luts; + } else if (INTEL_GEN(dev_priv) >= 4) { + dev_priv->display.color_check = i9xx_color_check; + dev_priv->display.color_commit = i9xx_color_commit; + dev_priv->display.load_luts = i965_load_luts; + } else { + dev_priv->display.color_check = i9xx_color_check; + dev_priv->display.color_commit = i9xx_color_commit; + dev_priv->display.load_luts = i9xx_load_luts; + } } else { - dev_priv->display.load_luts = i9xx_load_luts; + if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.color_check = icl_color_check; + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + dev_priv->display.color_check = glk_color_check; + else if (INTEL_GEN(dev_priv) >= 7) + dev_priv->display.color_check = ivb_color_check; + else + dev_priv->display.color_check = ilk_color_check; + + if (INTEL_GEN(dev_priv) >= 9) + dev_priv->display.color_commit = skl_color_commit; + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + dev_priv->display.color_commit = hsw_color_commit; + else + dev_priv->display.color_commit = ilk_color_commit; + + if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.load_luts = icl_load_luts; + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + dev_priv->display.load_luts = glk_load_luts; + else if (INTEL_GEN(dev_priv) >= 8) + dev_priv->display.load_luts = bdw_load_luts; + else if (INTEL_GEN(dev_priv) >= 7) + dev_priv->display.load_luts = ivb_load_luts; + else + dev_priv->display.load_luts = ilk_load_luts; } - /* Enable color management support when we have degamma & gamma LUTs. */ - if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 && - INTEL_INFO(dev_priv)->color.gamma_lut_size != 0) - drm_crtc_enable_color_mgmt(crtc, - INTEL_INFO(dev_priv)->color.degamma_lut_size, - true, - INTEL_INFO(dev_priv)->color.gamma_lut_size); + drm_crtc_enable_color_mgmt(&crtc->base, + INTEL_INFO(dev_priv)->color.degamma_lut_size, + has_ctm, + INTEL_INFO(dev_priv)->color.gamma_lut_size); } diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c index 18e370f607bc..848dd9e728d8 100644 --- a/drivers/gpu/drm/i915/intel_connector.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -27,7 +27,6 @@ #include <linux/i2c.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> -#include <drm/drmP.h> #include "intel_drv.h" #include "i915_drv.h" @@ -89,12 +88,18 @@ void intel_connector_destroy(struct drm_connector *connector) kfree(intel_connector->detect_edid); + intel_hdcp_cleanup(intel_connector); + if (!IS_ERR_OR_NULL(intel_connector->edid)) kfree(intel_connector->edid); intel_panel_fini(&intel_connector->panel); drm_connector_cleanup(connector); + + if (intel_connector->port) + drm_dp_mst_put_port_malloc(intel_connector->port); + kfree(connector); } @@ -262,3 +267,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) connector->dev->mode_config.aspect_ratio_property, DRM_MODE_PICTURE_ASPECT_NONE); } + +void +intel_attach_colorspace_property(struct drm_connector *connector) +{ + if (!drm_mode_create_colorspace_property(connector)) + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); +} diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c new file mode 100644 index 000000000000..8931e0fee873 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context.c @@ -0,0 +1,269 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_context.h" +#include "i915_globals.h" +#include "intel_context.h" +#include "intel_ringbuffer.h" + +static struct i915_global_context { + struct i915_global base; + struct kmem_cache *slab_ce; +} global; + +struct intel_context *intel_context_alloc(void) +{ + return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); +} + +void intel_context_free(struct intel_context *ce) +{ + kmem_cache_free(global.slab_ce, ce); +} + +struct intel_context * +intel_context_lookup(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = NULL; + struct rb_node *p; + + spin_lock(&ctx->hw_contexts_lock); + p = ctx->hw_contexts.rb_node; + while (p) { + struct intel_context *this = + rb_entry(p, struct intel_context, node); + + if (this->engine == engine) { + GEM_BUG_ON(this->gem_context != ctx); + ce = this; + break; + } + + if (this->engine < engine) + p = p->rb_right; + else + p = p->rb_left; + } + spin_unlock(&ctx->hw_contexts_lock); + + return ce; +} + +struct intel_context * +__intel_context_insert(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context *ce) +{ + struct rb_node **p, *parent; + int err = 0; + + spin_lock(&ctx->hw_contexts_lock); + + parent = NULL; + p = &ctx->hw_contexts.rb_node; + while (*p) { + struct intel_context *this; + + parent = *p; + this = rb_entry(parent, struct intel_context, node); + + if (this->engine == engine) { + err = -EEXIST; + ce = this; + break; + } + + if (this->engine < engine) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + if (!err) { + rb_link_node(&ce->node, parent, p); + rb_insert_color(&ce->node, &ctx->hw_contexts); + } + + spin_unlock(&ctx->hw_contexts_lock); + + return ce; +} + +void __intel_context_remove(struct intel_context *ce) +{ + struct i915_gem_context *ctx = ce->gem_context; + + spin_lock(&ctx->hw_contexts_lock); + rb_erase(&ce->node, &ctx->hw_contexts); + spin_unlock(&ctx->hw_contexts_lock); +} + +static struct intel_context * +intel_context_instance(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce, *pos; + + ce = intel_context_lookup(ctx, engine); + if (likely(ce)) + return ce; + + ce = intel_context_alloc(); + if (!ce) + return ERR_PTR(-ENOMEM); + + intel_context_init(ce, ctx, engine); + + pos = __intel_context_insert(ctx, engine, ce); + if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */ + intel_context_free(ce); + + GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos); + return pos; +} + +struct intel_context * +intel_context_pin_lock(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) + __acquires(ce->pin_mutex) +{ + struct intel_context *ce; + + ce = intel_context_instance(ctx, engine); + if (IS_ERR(ce)) + return ce; + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return ERR_PTR(-EINTR); + + return ce; +} + +struct intel_context * +intel_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce; + int err; + + ce = intel_context_instance(ctx, engine); + if (IS_ERR(ce)) + return ce; + + if (likely(atomic_inc_not_zero(&ce->pin_count))) + return ce; + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return ERR_PTR(-EINTR); + + if (likely(!atomic_read(&ce->pin_count))) { + err = ce->ops->pin(ce); + if (err) + goto err; + + i915_gem_context_get(ctx); + GEM_BUG_ON(ce->gem_context != ctx); + + mutex_lock(&ctx->mutex); + list_add(&ce->active_link, &ctx->active_engines); + mutex_unlock(&ctx->mutex); + + intel_context_get(ce); + smp_mb__before_atomic(); /* flush pin before it is visible */ + } + + atomic_inc(&ce->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ + + mutex_unlock(&ce->pin_mutex); + return ce; + +err: + mutex_unlock(&ce->pin_mutex); + return ERR_PTR(err); +} + +void intel_context_unpin(struct intel_context *ce) +{ + if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) + return; + + /* We may be called from inside intel_context_pin() to evict another */ + intel_context_get(ce); + mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); + + if (likely(atomic_dec_and_test(&ce->pin_count))) { + ce->ops->unpin(ce); + + mutex_lock(&ce->gem_context->mutex); + list_del(&ce->active_link); + mutex_unlock(&ce->gem_context->mutex); + + i915_gem_context_put(ce->gem_context); + intel_context_put(ce); + } + + mutex_unlock(&ce->pin_mutex); + intel_context_put(ce); +} + +static void intel_context_retire(struct i915_active_request *active, + struct i915_request *rq) +{ + struct intel_context *ce = + container_of(active, typeof(*ce), active_tracker); + + intel_context_unpin(ce); +} + +void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + kref_init(&ce->ref); + + ce->gem_context = ctx; + ce->engine = engine; + ce->ops = engine->cops; + + INIT_LIST_HEAD(&ce->signal_link); + INIT_LIST_HEAD(&ce->signals); + + mutex_init(&ce->pin_mutex); + + /* Use the whole device by default */ + ce->sseu = intel_device_default_sseu(ctx->i915); + + i915_active_request_init(&ce->active_tracker, + NULL, intel_context_retire); +} + +static void i915_global_context_shrink(void) +{ + kmem_cache_shrink(global.slab_ce); +} + +static void i915_global_context_exit(void) +{ + kmem_cache_destroy(global.slab_ce); +} + +static struct i915_global_context global = { { + .shrink = i915_global_context_shrink, + .exit = i915_global_context_exit, +} }; + +int __init i915_global_context_init(void) +{ + global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); + if (!global.slab_ce) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h new file mode 100644 index 000000000000..ebc861b1a49e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context.h @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_H__ +#define __INTEL_CONTEXT_H__ + +#include <linux/lockdep.h> + +#include "intel_context_types.h" +#include "intel_engine_types.h" + +struct intel_context *intel_context_alloc(void); +void intel_context_free(struct intel_context *ce); + +void intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +/** + * intel_context_lookup - Find the matching HW context for this (ctx, engine) + * @ctx - the parent GEM context + * @engine - the target HW engine + * + * May return NULL if the HW context hasn't been instantiated (i.e. unused). + */ +struct intel_context * +intel_context_lookup(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +/** + * intel_context_pin_lock - Stablises the 'pinned' status of the HW context + * @ctx - the parent GEM context + * @engine - the target HW engine + * + * Acquire a lock on the pinned status of the HW context, such that the context + * can neither be bound to the GPU or unbound whilst the lock is held, i.e. + * intel_context_is_pinned() remains stable. + */ +struct intel_context * +intel_context_pin_lock(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +static inline bool +intel_context_is_pinned(struct intel_context *ce) +{ + return atomic_read(&ce->pin_count); +} + +static inline void intel_context_pin_unlock(struct intel_context *ce) +__releases(ce->pin_mutex) +{ + mutex_unlock(&ce->pin_mutex); +} + +struct intel_context * +__intel_context_insert(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context *ce); +void +__intel_context_remove(struct intel_context *ce); + +struct intel_context * +intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); + +static inline void __intel_context_pin(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + atomic_inc(&ce->pin_count); +} + +void intel_context_unpin(struct intel_context *ce); + +static inline struct intel_context *intel_context_get(struct intel_context *ce) +{ + kref_get(&ce->ref); + return ce; +} + +static inline void intel_context_put(struct intel_context *ce) +{ + kref_put(&ce->ref, ce->ops->destroy); +} + +#endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h new file mode 100644 index 000000000000..624729a35875 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context_types.h @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_TYPES__ +#define __INTEL_CONTEXT_TYPES__ + +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/types.h> + +#include "i915_active_types.h" + +struct i915_gem_context; +struct i915_vma; +struct intel_context; +struct intel_ring; + +struct intel_context_ops { + int (*pin)(struct intel_context *ce); + void (*unpin)(struct intel_context *ce); + + void (*destroy)(struct kref *kref); +}; + +/* + * Powergating configuration for a particular (context,engine). + */ +struct intel_sseu { + u8 slice_mask; + u8 subslice_mask; + u8 min_eus_per_subslice; + u8 max_eus_per_subslice; +}; + +struct intel_context { + struct kref ref; + + struct i915_gem_context *gem_context; + struct intel_engine_cs *engine; + struct intel_engine_cs *active; + + struct list_head active_link; + struct list_head signal_link; + struct list_head signals; + + struct i915_vma *state; + struct intel_ring *ring; + + u32 *lrc_reg_state; + u64 lrc_desc; + + atomic_t pin_count; + struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ + + /** + * active_tracker: Active tracker for the external rq activity + * on this intel_context object. + */ + struct i915_active_request active_tracker; + + const struct intel_context_ops *ops; + struct rb_node node; + + /** sseu: Control eu/slice partitioning */ + struct intel_sseu sseu; +}; + +#endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 68f2fb89ece3..50530e49982c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -27,11 +27,10 @@ #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/slab.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" @@ -84,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -322,7 +323,7 @@ intel_crt_mode_valid(struct drm_connector *connector, * DAC limit supposedly 355 MHz. */ max_clock = 270000; - else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) + else if (IS_GEN_RANGE(dev_priv, 3, 4)) max_clock = 400000; else max_clock = 350000; @@ -344,51 +345,52 @@ intel_crt_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + + return 0; } -static bool pch_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int pch_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + return 0; } -static bool hsw_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int hsw_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* HSW/BDW FDI limited to 4k */ if (adjusted_mode->crtc_hdisplay > 4096 || adjusted_mode->crtc_hblank_start > 4096) - return false; + return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -397,7 +399,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev_priv)) { if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { DRM_DEBUG_KMS("LPT only supports 24bpp\n"); - return false; + return -EINVAL; } pipe_config->pipe_bpp = 24; @@ -406,7 +408,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, /* FDI must always be 2.7 GHz */ pipe_config->port_clock = 135000 * 2; - return true; + return 0; } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) @@ -433,7 +435,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, 1000)) @@ -487,7 +489,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, 1000)) { @@ -540,7 +542,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN, + if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN, CRT_HOTPLUG_FORCE_DETECT, 0, 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); @@ -629,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) } static enum drm_connector_status -intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) +intel_crt_load_detect(struct intel_crt *crt, u32 pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t save_bclrpat; - uint32_t save_vtotal; - uint32_t vtotal, vactive; - uint32_t vsample; - uint32_t vblank, vblank_start, vblank_end; - uint32_t dsl; + u32 save_bclrpat; + u32 save_vtotal; + u32 vtotal, vactive; + u32 vsample; + u32 vblank, vblank_start, vblank_end; + u32 dsl; i915_reg_t bclrpat_reg, vtotal_reg, vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg; - uint8_t st00; + u8 st00; enum drm_connector_status status; DRM_DEBUG_KMS("starting load-detect on CRT\n"); @@ -666,8 +668,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) /* Set the border color to purple. */ I915_WRITE(bclrpat_reg, 0x500050); - if (!IS_GEN2(dev_priv)) { - uint32_t pipeconf = I915_READ(pipeconf_reg); + if (!IS_GEN(dev_priv, 2)) { + u32 pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); POSTING_READ(pipeconf_reg); /* Wait for next Vblank to substitue @@ -688,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - uint32_t vsync = I915_READ(vsync_reg); - uint32_t vsync_start = (vsync & 0xffff) + 1; + u32 vsync = I915_READ(vsync_reg); + u32 vsync_start = (vsync & 0xffff) + 1; vblank_start = vsync_start; I915_WRITE(vblank_reg, @@ -777,6 +779,7 @@ intel_crt_detect(struct drm_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; + intel_wakeref_t wakeref; int status, ret; struct intel_load_detect_pipe tmp; @@ -785,7 +788,8 @@ intel_crt_detect(struct drm_connector *connector, force); if (i915_modparams.load_detect_test) { - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); goto load_detect; } @@ -793,7 +797,8 @@ intel_crt_detect(struct drm_connector *connector, if (dmi_check_system(intel_spurious_crt_detect)) return connector_status_disconnected; - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); if (I915_HAS_HOTPLUG(dev_priv)) { /* We can not rely on the HPD pin always being correctly wired @@ -848,7 +853,7 @@ load_detect: } out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return status; } @@ -858,10 +863,12 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; - int ret; + intel_wakeref_t wakeref; struct i2c_adapter *i2c; + int ret; - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); @@ -873,7 +880,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) ret = intel_crt_ddc_get_modes(connector, i2c); out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -981,7 +988,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) connector->interlace_allowed = 0; else connector->interlace_allowed = 1; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index a516697bf57d..862a8f686ef5 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH); struct intel_css_header { /* 0x09 for DMC */ - uint32_t module_type; + u32 module_type; /* Includes the DMC specific header in dwords */ - uint32_t header_len; + u32 header_len; /* always value would be 0x10000 */ - uint32_t header_ver; + u32 header_ver; /* Not used */ - uint32_t module_id; + u32 module_id; /* Not used */ - uint32_t module_vendor; + u32 module_vendor; /* in YYYYMMDD format */ - uint32_t date; + u32 date; /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ - uint32_t size; + u32 size; /* Not used */ - uint32_t key_size; + u32 key_size; /* Not used */ - uint32_t modulus_size; + u32 modulus_size; /* Not used */ - uint32_t exponent_size; + u32 exponent_size; /* Not used */ - uint32_t reserved1[12]; + u32 reserved1[12]; /* Major Minor */ - uint32_t version; + u32 version; /* Not used */ - uint32_t reserved2[8]; + u32 reserved2[8]; /* Not used */ - uint32_t kernel_header_info; + u32 kernel_header_info; } __packed; struct intel_fw_info { - uint16_t reserved1; + u16 reserved1; /* Stepping (A, B, C, ..., *). * is a wildcard */ char stepping; @@ -121,8 +121,8 @@ struct intel_fw_info { /* Sub-stepping (0, 1, ..., *). * is a wildcard */ char substepping; - uint32_t offset; - uint32_t reserved2; + u32 offset; + u32 reserved2; } __packed; struct intel_package_header { @@ -135,14 +135,14 @@ struct intel_package_header { unsigned char reserved[10]; /* Number of valid entries in the FWInfo array below */ - uint32_t num_entries; + u32 num_entries; struct intel_fw_info fw_info[20]; } __packed; struct intel_dmc_header { /* always value would be 0x40403E3E */ - uint32_t signature; + u32 signature; /* DMC binary header length */ unsigned char header_len; @@ -151,30 +151,30 @@ struct intel_dmc_header { unsigned char header_ver; /* Reserved */ - uint16_t dmcc_ver; + u16 dmcc_ver; /* Major, Minor */ - uint32_t project; + u32 project; /* Firmware program size (excluding header) in dwords */ - uint32_t fw_size; + u32 fw_size; /* Major Minor version */ - uint32_t fw_version; + u32 fw_version; /* Number of valid MMIO cycles present. */ - uint32_t mmio_count; + u32 mmio_count; /* MMIO address */ - uint32_t mmioaddr[8]; + u32 mmioaddr[8]; /* MMIO data */ - uint32_t mmiodata[8]; + u32 mmiodata[8]; /* FW filename */ unsigned char dfile[32]; - uint32_t reserved1[2]; + u32 reserved1[2]; } __packed; struct stepping_info { @@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv) static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) { - uint32_t val, mask; + u32 val, mask; mask = DC_STATE_DEBUG_MASK_MEMORY_UP; @@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) void intel_csr_load_program(struct drm_i915_private *dev_priv) { u32 *payload = dev_priv->csr.dmc_payload; - uint32_t i, fw_size; + u32 i, fw_size; if (!HAS_CSR(dev_priv)) { DRM_ERROR("No CSR support available for this platform\n"); @@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) gen9_set_dc_state_debugmask(dev_priv); } -static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, - const struct firmware *fw) +static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, + const struct firmware *fw) { struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header *dmc_header; struct intel_csr *csr = &dev_priv->csr; const struct stepping_info *si = intel_get_stepping_info(dev_priv); - uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; - uint32_t i; - uint32_t *dmc_payload; + u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; + u32 i; + u32 *dmc_payload; if (!fw) return NULL; @@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return memcpy(dmc_payload, &fw->data[readcount], nbytes); } +static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv) +{ + WARN_ON(dev_priv->csr.wakeref); + dev_priv->csr.wakeref = + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); +} + +static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv) +{ + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&dev_priv->csr.wakeref); + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); +} + static void csr_load_work_fn(struct work_struct *work) { struct drm_i915_private *dev_priv; @@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work) if (dev_priv->csr.dmc_payload) { intel_csr_load_program(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_put(dev_priv); DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n", dev_priv->csr.fw_path, @@ -467,12 +481,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) * suspend as runtime suspend *requires* a working CSR for whatever * reason. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_get(dev_priv); if (INTEL_GEN(dev_priv) >= 12) { /* Allow to load fw via parameter using the last known size */ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; - } else if (IS_ICELAKE(dev_priv)) { + } else if (IS_GEN(dev_priv, 11)) { csr->fw_path = ICL_CSR_PATH; csr->required_version = ICL_CSR_VERSION_REQUIRED; csr->max_fw_size = ICL_CSR_MAX_FW_SIZE; @@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv) /* Drop the reference held in case DMC isn't loaded. */ if (!dev_priv->csr.dmc_payload) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_put(dev_priv); } /** @@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv) * loaded. */ if (!dev_priv->csr.dmc_payload) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_get(dev_priv); } /** @@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) return; intel_csr_ucode_suspend(dev_priv); + WARN_ON(dev_priv->csr.wakeref); kfree(dev_priv->csr.dmc_payload); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..3f1e491bd0c0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ }; -struct icl_combo_phy_ddi_buf_trans { - u32 dw2_swing_select; - u32 dw2_swing_scalar; - u32 dw4_scaling; -}; - -/* Voltage Swing Programming for VccIO 0.85V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ - { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ - { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ -}; - -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.85V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +/* icl_combo_phy_ddi_translations */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 0.95V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ }; -/* Voltage Swing Programming for VccIO 1.05V for DP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { - /* Voltage mV db */ - { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ - { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ - { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ - { 0x2, 0x98, 0x900F }, /* 400 9.5 */ - { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ - { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ - { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ - { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ - { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ - { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; -/* FIXME - After table is updated in Bspec */ -/* Voltage Swing Programming for VccIO 1.05V for eDP */ -static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { - /* Voltage mV db */ - { 0x0, 0x00, 0x00 }, /* 200 0.0 */ - { 0x0, 0x00, 0x00 }, /* 200 1.5 */ - { 0x0, 0x00, 0x00 }, /* 200 4.0 */ - { 0x0, 0x00, 0x00 }, /* 200 6.0 */ - { 0x0, 0x00, 0x00 }, /* 250 0.0 */ - { 0x0, 0x00, 0x00 }, /* 250 1.5 */ - { 0x0, 0x00, 0x00 }, /* 250 4.0 */ - { 0x0, 0x00, 0x00 }, /* 300 0.0 */ - { 0x0, 0x00, 0x00 }, /* 300 1.5 */ - { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ }; struct icl_mg_phy_ddi_buf_trans { @@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } } -static const struct icl_combo_phy_ddi_buf_trans * +static const struct cnl_ddi_buf_trans * icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, - int type, int *n_entries) + int type, int rate, int *n_entries) { - u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; - - if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); - return icl_combo_phy_ddi_translations_edp_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); - return icl_combo_phy_ddi_translations_edp_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); - return icl_combo_phy_ddi_translations_edp_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } - } else { - switch (voltage) { - case VOLTAGE_INFO_0_85V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; - case VOLTAGE_INFO_0_95V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); - return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; - case VOLTAGE_INFO_1_05V: - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); - return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; - default: - MISSING_CASE(voltage); - return NULL; - } + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; } + + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; } static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) @@ -916,10 +851,10 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, - INTEL_OUTPUT_HDMI, &n_entries); + icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + 0, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); default_entry = n_entries - 1; @@ -1039,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); } -static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) +static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { switch (pll->info->id) { case DPLL_ID_WRPLL1: @@ -1060,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) } } -static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { const struct intel_shared_dpll *pll = crtc_state->shared_dpll; int clock = crtc_state->port_clock; @@ -1069,10 +1004,11 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, switch (id) { default: + /* + * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used + * here, so do warn if this get passed in + */ MISSING_CASE(id); - /* fall through */ - case DPLL_ID_ICL_DPLL0: - case DPLL_ID_ICL_DPLL1: return DDI_CLK_SEL_NONE; case DPLL_ID_ICL_TBTPLL: switch (clock) { @@ -1086,7 +1022,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, return DDI_CLK_SEL_TBT_810; default: MISSING_CASE(clock); - break; + return DDI_CLK_SEL_NONE; } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: @@ -1304,24 +1240,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id) +static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state) { - i915_reg_t cfgcr1_reg, cfgcr2_reg; - uint32_t cfgcr1_val, cfgcr2_val; - uint32_t p0, p1, p2, dco_freq; - - cfgcr1_reg = DPLL_CFGCR1(pll_id); - cfgcr2_reg = DPLL_CFGCR2(pll_id); + u32 p0, p1, p2, dco_freq; - cfgcr1_val = I915_READ(cfgcr1_reg); - cfgcr2_val = I915_READ(cfgcr2_reg); + p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; - p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK; - p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK; - - if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1)) - p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; else p1 = 1; @@ -1356,33 +1283,28 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, break; } - dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000; + dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) + * 24 * 1000; - dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 * - 1000) / 0x8000; + dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) + * 24 * 1000) / 0x8000; + + if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + return 0; return dco_freq / (p0 * p1 * p2 * 5); } int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id) + struct intel_dpll_hw_state *pll_state) { - uint32_t cfgcr0, cfgcr1; - uint32_t p0, p1, p2, dco_freq, ref_clock; - - if (INTEL_GEN(dev_priv) >= 11) { - cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); - } else { - cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); - } + u32 p0, p1, p2, dco_freq, ref_clock; - p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK; - p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; - if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) - p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> DPLL_CFGCR1_QDIV_RATIO_SHIFT; else p1 = 1; @@ -1410,16 +1332,17 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, case DPLL_CFGCR1_KDIV_2: p2 = 2; break; - case DPLL_CFGCR1_KDIV_4: - p2 = 4; + case DPLL_CFGCR1_KDIV_3: + p2 = 3; break; } ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) + * ref_clock; - dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) @@ -1451,24 +1374,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, - enum port port) + const struct intel_dpll_hw_state *pll_state) { - u32 mg_pll_div0, mg_clktop_hsclkctl; - u32 m1, m2_int, m2_frac, div1, div2, refclk; + u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; - refclk = dev_priv->cdclk.hw.ref; - - mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); - mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + ref_clock = dev_priv->cdclk.hw.ref; - m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? - (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> - MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? + (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> + MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; - switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + switch (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: div1 = 2; break; @@ -1482,12 +1402,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, div1 = 7; break; default: - MISSING_CASE(mg_clktop_hsclkctl); + MISSING_CASE(pll_state->mg_clktop2_hsclkctl); return 0; } - div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + div2 = (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + /* div2 value of 0 is same as 1 means no div */ if (div2 == 0) div2 = 1; @@ -1496,8 +1418,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, * Adjust the original formula to delay the division by 2^22 in order to * minimize possible rounding errors. */ - tmp = (u64)m1 * m2_int * refclk + - (((u64)m1 * m2_frac * refclk) >> 22); + tmp = (u64)m1 * m2_int * ref_clock + + (((u64)m1 * m2_frac * ref_clock) >> 22); tmp = div_u64(tmp, 5 * div1 * div2); return tmp; @@ -1531,25 +1453,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; enum port port = encoder->port; - int link_clock = 0; - uint32_t pll_id; + int link_clock; - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); if (intel_port_is_combophy(dev_priv, port)) { - if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) - link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); - else - link_clock = icl_calc_dp_combo_pll_link(dev_priv, - pll_id); + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { + enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, + pipe_config->shared_dpll); + if (pll_id == DPLL_ID_ICL_TBTPLL) link_clock = icl_calc_tbt_pll_link(dev_priv, port); else - link_clock = icl_calc_mg_pll_link(dev_priv, port); + link_clock = icl_calc_mg_pll_link(dev_priv, pll_state); } pipe_config->port_clock = link_clock; + ddi_dotclock_get(pipe_config); } @@ -1557,18 +1478,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - uint32_t cfgcr0; - enum intel_dpll_id pll_id; - - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; - cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); - - if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { - link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; + link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; switch (link_clock) { case DPLL_CFGCR0_LINK_RATE_810: @@ -1608,22 +1524,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder, } static void skl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - uint32_t dpll_ctl1; - enum intel_dpll_id pll_id; + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); - - dpll_ctl1 = I915_READ(DPLL_CTRL1); - - if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) { - link_clock = skl_calc_wrpll_link(dev_priv, pll_id); + /* + * ctrl1 register is already shifted for each pll, just use 0 to get + * the internal shift for each field + */ + if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) { + link_clock = skl_calc_wrpll_link(pll_state); } else { - link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id); - link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id); + link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0); + link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0); switch (link_clock) { case DPLL_CTRL1_LINK_RATE_810: @@ -1703,24 +1617,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, ddi_dotclock_get(pipe_config); } -static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state) +static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state) { - struct intel_dpll_hw_state *state; struct dpll clock; - /* For DDI ports we always use a shared PLL. */ - if (WARN_ON(!crtc_state->shared_dpll)) - return 0; - - state = &crtc_state->dpll_hw_state; - clock.m1 = 2; - clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22; - if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK; - clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; - clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; - clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; + clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; return chv_calc_dpll_params(100000, &clock); } @@ -1728,7 +1635,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state) static void bxt_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - pipe_config->port_clock = bxt_calc_pll_link(pipe_config); + pipe_config->port_clock = + bxt_calc_pll_link(&pipe_config->dpll_hw_state); ddi_dotclock_get(pipe_config); } @@ -1738,7 +1646,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_clock_get(encoder, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_clock_get(encoder, pipe_config); @@ -1801,7 +1709,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - uint32_t temp; + u32 temp; temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (state == true) @@ -1819,7 +1727,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; - uint32_t temp; + u32 temp; /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; @@ -1880,7 +1788,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) temp |= TRANS_DDI_MODE_SELECT_DVI; if (crtc_state->hdmi_scrambling) - temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK; + temp |= TRANS_DDI_HDMI_SCRAMBLING; if (crtc_state->hdmi_high_tmds_clock_ratio) temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { @@ -1903,7 +1811,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - uint32_t val = I915_READ(reg); + u32 val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; @@ -1922,12 +1830,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + intel_wakeref_t wakeref; enum pipe pipe = 0; int ret = 0; - uint32_t tmp; + u32 tmp; - if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv, - intel_encoder->power_domain))) + wakeref = intel_display_power_get_if_enabled(dev_priv, + intel_encoder->power_domain); + if (WARN_ON(!wakeref)) return -ENXIO; if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { @@ -1942,7 +1852,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, tmp &= ~TRANS_DDI_HDCP_SIGNALLING; I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -1953,13 +1863,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) struct intel_encoder *encoder = intel_connector->encoder; int type = intel_connector->base.connector_type; enum port port = encoder->port; - enum pipe pipe = 0; enum transcoder cpu_transcoder; - uint32_t tmp; + intel_wakeref_t wakeref; + enum pipe pipe = 0; + u32 tmp; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; if (!encoder->get_hw_state(encoder, &pipe)) { @@ -1967,7 +1879,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -2001,7 +1913,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) } out: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -2012,6 +1924,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = encoder->port; + intel_wakeref_t wakeref; enum pipe p; u32 tmp; u8 mst_pipe_mask; @@ -2019,15 +1932,16 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, *pipe_mask = 0; *is_dp_mst = false; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return; tmp = I915_READ(DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; - if (port == PORT_A) { + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) { tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { @@ -2091,7 +2005,7 @@ out: "(PHY_CTL %08x)\n", port_name(port), tmp); } - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, @@ -2188,7 +2102,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, - enum port port, uint8_t iboost) + enum port port, u8 iboost) { u32 tmp; @@ -2207,7 +2121,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - uint8_t iboost; + u8 iboost; if (type == INTEL_OUTPUT_HDMI) iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; @@ -2275,13 +2189,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; int n_entries; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) icl_get_combo_buf_trans(dev_priv, port, encoder->type, - &n_entries); + intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2370,13 +2285,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overrite individual loadgen */ for (ln = 0; ln < 4; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); } /* Program PORT_TX_DW5 */ @@ -2432,14 +2347,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ @@ -2462,14 +2377,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) + u32 level, enum port port, int type, + int rate) { - const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; + const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, - &n_entries); + rate, &n_entries); if (!ddi_translations) return; @@ -2478,45 +2394,42 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, level = n_entries - 1; } - /* Set PORT_TX_DW5 Rterm Sel to 110b. */ + /* Set PORT_TX_DW5 */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - val &= ~RTERM_SELECT_MASK; + val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | + TAP2_DISABLE | TAP3_DISABLE); + val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); - - /* Program PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); - /* Set DisableTap2 and DisableTap3 if MIPI DSI - * Clear DisableTap2 and DisableTap3 for all other Ports - */ - if (type == INTEL_OUTPUT_DSI) { - val |= TAP2_DISABLE; - val |= TAP3_DISABLE; - } else { - val &= ~TAP2_DISABLE; - val &= ~TAP3_DISABLE; - } + val |= TAP3_DISABLE; I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW2 */ val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); - val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); + val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); + val |= RCOMP_SCALAR(0x98); I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); - val |= ddi_translations[level].dw4_scaling; - I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); + val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); } + + /* Program PORT_TX_DW7 */ + val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; + val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2560,14 +2473,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ @@ -2581,7 +2494,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, port, type); + icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); /* 6. Set training enable to trigger update */ val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); @@ -2610,33 +2523,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_LINK_PARAMS(port, ln)); + val = I915_READ(MG_TX1_LINK_PARAMS(ln, port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val); + I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val); - val = I915_READ(MG_TX2_LINK_PARAMS(port, ln)); + val = I915_READ(MG_TX2_LINK_PARAMS(ln, port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val); + I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_SWINGCTRL(port, ln)); + val = I915_READ(MG_TX1_SWINGCTRL(ln, port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val); + I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val); - val = I915_READ(MG_TX2_SWINGCTRL(port, ln)); + val = I915_READ(MG_TX2_SWINGCTRL(ln, port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val); + I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DRVCTRL(port, ln)); + val = I915_READ(MG_TX1_DRVCTRL(ln, port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2644,9 +2557,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX1_DRVCTRL(port, ln), val); + I915_WRITE(MG_TX1_DRVCTRL(ln, port), val); - val = I915_READ(MG_TX2_DRVCTRL(port, ln)); + val = I915_READ(MG_TX2_DRVCTRL(ln, port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2654,7 +2567,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX2_DRVCTRL(port, ln), val); + I915_WRITE(MG_TX2_DRVCTRL(ln, port), val); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -2665,17 +2578,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_CLKHUB(port, ln)); + val = I915_READ(MG_CLKHUB(ln, port)); if (link_clock < 300000) val |= CFG_LOW_RATE_LKREN_EN; else val &= ~CFG_LOW_RATE_LKREN_EN; - I915_WRITE(MG_CLKHUB(port, ln), val); + I915_WRITE(MG_CLKHUB(ln, port), val); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DCC(port, ln)); + val = I915_READ(MG_TX1_DCC(ln, port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2683,9 +2596,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX1_DCC(port, ln), val); + I915_WRITE(MG_TX1_DCC(ln, port), val); - val = I915_READ(MG_TX2_DCC(port, ln)); + val = I915_READ(MG_TX2_DCC(ln, port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2693,18 +2606,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX2_DCC(port, ln), val); + I915_WRITE(MG_TX2_DCC(ln, port), val); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_PISO_READLOAD(port, ln)); + val = I915_READ(MG_TX1_PISO_READLOAD(ln, port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val); + I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val); - val = I915_READ(MG_TX2_PISO_READLOAD(port, ln)); + val = I915_READ(MG_TX2_PISO_READLOAD(ln, port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val); + I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val); } } @@ -2722,7 +2635,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } -static uint32_t translate_signal_level(int signal_levels) +static u32 translate_signal_level(int signal_levels) { int i; @@ -2737,9 +2650,9 @@ static uint32_t translate_signal_level(int signal_levels) return 0; } -static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp) +static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) { - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -2753,7 +2666,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dport->base; int level = intel_ddi_dp_level(intel_dp); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -2764,7 +2677,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) return 0; } -uint32_t ddi_signal_levels(struct intel_dp *intel_dp) +u32 ddi_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); @@ -2778,8 +2691,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) } static inline -uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum port port) +u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, + enum port port) { if (intel_port_is_combophy(dev_priv, port)) { return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port); @@ -2914,7 +2827,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - uint32_t val; + u32 val; const struct intel_shared_dpll *pll = crtc_state->shared_dpll; if (WARN_ON(!pll)) @@ -2922,10 +2835,10 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, mutex_lock(&dev_priv->dpll_lock); - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (!intel_port_is_combophy(dev_priv, port)) I915_WRITE(DDI_CLK_SEL(port), - icl_pll_to_ddi_pll_sel(encoder, crtc_state)); + icl_pll_to_ddi_clk_sel(encoder, crtc_state)); } else if (IS_CANNONLAKE(dev_priv)) { /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ val = I915_READ(DPCLKA_CFGCR0); @@ -2964,7 +2877,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (!intel_port_is_combophy(dev_priv, port)) I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2983,7 +2896,7 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + i915_reg_t mg_regs[2] = { MG_DP_MODE(0, port), MG_DP_MODE(1, port) }; u32 val; int i; @@ -3054,8 +2967,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) return; - ln0 = I915_READ(MG_DP_MODE(port, 0)); - ln1 = I915_READ(MG_DP_MODE(port, 1)); + ln0 = I915_READ(MG_DP_MODE(0, port)); + ln1 = I915_READ(MG_DP_MODE(1, port)); switch (intel_dig_port->tc_type) { case TC_PORT_TYPEC: @@ -3105,8 +3018,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) return; } - I915_WRITE(MG_DP_MODE(port, 0), ln0); - I915_WRITE(MG_DP_MODE(port, 1), ln1); + I915_WRITE(MG_DP_MODE(0, port), ln0); + I915_WRITE(MG_DP_MODE(1, port), ln1); } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, @@ -3133,7 +3046,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, val |= DP_TP_CTL_FEC_ENABLE; I915_WRITE(DP_TP_CTL(port), val); - if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_FEC_ENABLE_LIVE, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) @@ -3181,7 +3094,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -3230,7 +3143,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) @@ -3349,7 +3262,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); - intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); } @@ -3369,7 +3283,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, intel_disable_ddi_buf(encoder, old_crtc_state); - intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); @@ -3411,7 +3326,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - uint32_t val; + u32 val; /* * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) @@ -3603,6 +3518,35 @@ static void intel_disable_ddi(struct intel_encoder *encoder, intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); } +static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_ddi_set_pipe_settings(crtc_state); + + intel_psr_update(intel_dp, crtc_state); + intel_edp_drrs_enable(intel_dp, crtc_state); + + intel_panel_update_backlight(encoder, crtc_state, conn_state); +} + +static void intel_ddi_update_pipe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); + + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); + else if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + intel_hdcp_disable(to_intel_connector(conn_state->connector)); +} + static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, enum port port) @@ -3671,8 +3615,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, if (intel_crtc_has_dp_encoder(crtc_state) || intel_port_is_tc(dev_priv, encoder->port)) - intel_display_power_put(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); + intel_display_power_put_unchecked(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); } void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -3681,7 +3625,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - uint32_t val; + u32 val; bool wait = false; if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { @@ -3737,7 +3681,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { - if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) + if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; @@ -3790,11 +3734,13 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; intel_dig_port = enc_to_dig_port(&encoder->base); - if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); + + if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; - if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == - TRANS_DDI_HDMI_SCRAMBLING_MASK) + if (temp & TRANS_DDI_HDMI_SCRAMBLING) pipe_config->hdmi_scrambling = true; if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) pipe_config->hdmi_high_tmds_clock_ratio = true; @@ -3855,6 +3801,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder, bxt_ddi_phy_get_lane_lat_optim_mask(encoder); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + + intel_hdmi_read_gcp_infoframe(encoder, pipe_config); + + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_AVI, + &pipe_config->infoframes.avi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_SPD, + &pipe_config->infoframes.spd); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_VENDOR, + &pipe_config->infoframes.hdmi); } static enum intel_output_type @@ -3875,15 +3833,15 @@ intel_ddi_compute_output_type(struct intel_encoder *encoder, } } -static bool intel_ddi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_ddi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int ret; - if (port == PORT_A) + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) @@ -3901,9 +3859,50 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, } +static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_dp_encoder_suspend(encoder); + + /* + * TODO: disconnect also from USB DP alternate mode once we have a + * way to handle the modeset restore in that mode during resume + * even if the sink has disappeared while being suspended. + */ + if (dig_port->tc_legacy_port) + icl_tc_phy_disconnect(i915, dig_port); +} + +static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder); + struct drm_i915_private *i915 = to_i915(drm_encoder->dev); + + if (intel_port_is_tc(i915, dig_port->base.port)) + intel_digital_port_connected(&dig_port->base); + + intel_dp_encoder_reset(drm_encoder); +} + +static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *i915 = to_i915(encoder->dev); + + intel_dp_encoder_flush_work(encoder); + + if (intel_port_is_tc(i915, dig_port->base.port)) + icl_tc_phy_disconnect(i915, dig_port); + + drm_encoder_cleanup(encoder); + kfree(dig_port); +} + static const struct drm_encoder_funcs intel_ddi_funcs = { - .reset = intel_dp_encoder_reset, - .destroy = intel_dp_encoder_destroy, + .reset = intel_ddi_encoder_reset, + .destroy = intel_ddi_encoder_destroy, }; static struct intel_connector * @@ -3944,23 +3943,10 @@ static int modeset_pipe(struct drm_crtc *crtc, goto out; } - crtc_state->mode_changed = true; - - ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) - goto out; - - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - goto out; + crtc_state->connectors_changed = true; ret = drm_atomic_commit(state); - if (ret) - goto out; - - return 0; - - out: +out: drm_atomic_state_put(state); return ret; @@ -4147,16 +4133,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { + struct ddi_vbt_port_info *port_info = + &dev_priv->vbt.ddi_port_info[port]; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; enum pipe pipe; - - init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || - dev_priv->vbt.ddi_port_info[port].supports_hdmi); - init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; + init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; + init_dp = port_info->supports_dp; if (intel_bios_is_lspcon_present(dev_priv, port)) { /* @@ -4195,9 +4181,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; intel_encoder->post_disable = intel_ddi_post_disable; + intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; - intel_encoder->suspend = intel_dp_encoder_suspend; + intel_encoder->suspend = intel_ddi_encoder_suspend; intel_encoder->get_power_domains = intel_ddi_get_power_domains; intel_encoder->type = INTEL_OUTPUT_DDI; intel_encoder->power_domain = intel_port_to_power_domain(port); @@ -4216,6 +4203,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) && + !port_info->supports_typec_usb && + !port_info->supports_tbt; + switch (port) { case PORT_A: intel_dig_port->ddi_io_power_domain = @@ -4274,6 +4265,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } intel_infoframe_init(intel_dig_port); + + if (intel_port_is_tc(dev_priv, port)) + intel_digital_port_connected(intel_encoder); + return; err: diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 1e56319334f3..6af480b95bc6 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -57,6 +57,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(COFFEELAKE), PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(ICELAKE), + PLATFORM_NAME(ELKHARTLAKE), }; #undef PLATFORM_NAME @@ -104,7 +105,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); } -void intel_device_info_dump_runtime(const struct intel_device_info *info, +void intel_device_info_dump_runtime(const struct intel_runtime_info *info, struct drm_printer *p) { sseu_dump(&info->sseu, p); @@ -113,21 +114,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info, info->cs_timestamp_frequency_khz); } -void intel_device_info_dump(const struct intel_device_info *info, - struct drm_printer *p) -{ - struct drm_i915_private *dev_priv = - container_of(info, struct drm_i915_private, info); - - drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n", - INTEL_DEVID(dev_priv), - INTEL_REVID(dev_priv), - intel_platform_name(info->platform), - info->gen); - - intel_device_info_dump_flags(info, p); -} - void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, struct drm_printer *p) { @@ -164,15 +150,21 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu) static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) { - struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u8 s_en; u32 ss_en, ss_en_mask; u8 eu_en; int s; - sseu->max_slices = 1; - sseu->max_subslices = 8; - sseu->max_eus_per_subslice = 8; + if (IS_ELKHARTLAKE(dev_priv)) { + sseu->max_slices = 1; + sseu->max_subslices = 4; + sseu->max_eus_per_subslice = 8; + } else { + sseu->max_slices = 1; + sseu->max_subslices = 8; + sseu->max_eus_per_subslice = 8; + } s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); @@ -203,7 +195,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) { - struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; const u32 fuse2 = I915_READ(GEN8_FUSE2); int s, ss; const int eu_mask = 0xff; @@ -280,7 +272,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { - struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse; fuse = I915_READ(CHV_FUSE_GT); @@ -334,7 +326,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); - struct sseu_dev_info *sseu = &info->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int s, ss; u32 fuse2, eu_disable, subslice_mask; const u8 eu_mask = 0xff; @@ -437,7 +429,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) { - struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int s, ss; u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ @@ -519,8 +511,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) { - struct intel_device_info *info = mkwrite_device_info(dev_priv); - struct sseu_dev_info *sseu = &info->sseu; + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; int s, ss; @@ -528,9 +519,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) * There isn't a register to tell us how many slices/subslices. We * work off the PCI-ids here. */ - switch (info->gt) { + switch (INTEL_INFO(dev_priv)->gt) { default: - MISSING_CASE(info->gt); + MISSING_CASE(INTEL_INFO(dev_priv)->gt); /* fall through */ case 1: sseu->slice_mask = BIT(0); @@ -723,9 +714,102 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) return 0; } +#undef INTEL_VGA_DEVICE +#define INTEL_VGA_DEVICE(id, info) (id) + +static const u16 subplatform_ult_ids[] = { + INTEL_HSW_ULT_GT1_IDS(0), + INTEL_HSW_ULT_GT2_IDS(0), + INTEL_HSW_ULT_GT3_IDS(0), + INTEL_BDW_ULT_GT1_IDS(0), + INTEL_BDW_ULT_GT2_IDS(0), + INTEL_BDW_ULT_GT3_IDS(0), + INTEL_BDW_ULT_RSVD_IDS(0), + INTEL_SKL_ULT_GT1_IDS(0), + INTEL_SKL_ULT_GT2_IDS(0), + INTEL_SKL_ULT_GT3_IDS(0), + INTEL_KBL_ULT_GT1_IDS(0), + INTEL_KBL_ULT_GT2_IDS(0), + INTEL_KBL_ULT_GT3_IDS(0), + INTEL_CFL_U_GT2_IDS(0), + INTEL_CFL_U_GT3_IDS(0), + INTEL_WHL_U_GT1_IDS(0), + INTEL_WHL_U_GT2_IDS(0), + INTEL_WHL_U_GT3_IDS(0) +}; + +static const u16 subplatform_ulx_ids[] = { + INTEL_HSW_ULX_GT1_IDS(0), + INTEL_HSW_ULX_GT2_IDS(0), + INTEL_BDW_ULX_GT1_IDS(0), + INTEL_BDW_ULX_GT2_IDS(0), + INTEL_BDW_ULX_GT3_IDS(0), + INTEL_BDW_ULX_RSVD_IDS(0), + INTEL_SKL_ULX_GT1_IDS(0), + INTEL_SKL_ULX_GT2_IDS(0), + INTEL_KBL_ULX_GT1_IDS(0), + INTEL_KBL_ULX_GT2_IDS(0) +}; + +static const u16 subplatform_aml_ids[] = { + INTEL_AML_KBL_GT2_IDS(0), + INTEL_AML_CFL_GT2_IDS(0) +}; + +static const u16 subplatform_portf_ids[] = { + INTEL_CNL_PORT_F_IDS(0), + INTEL_ICL_PORT_F_IDS(0) +}; + +static bool find_devid(u16 id, const u16 *p, unsigned int num) +{ + for (; num; num--, p++) { + if (*p == id) + return true; + } + + return false; +} + +void intel_device_info_subplatform_init(struct drm_i915_private *i915) +{ + const struct intel_device_info *info = INTEL_INFO(i915); + const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915); + const unsigned int pi = __platform_mask_index(rinfo, info->platform); + const unsigned int pb = __platform_mask_bit(rinfo, info->platform); + u16 devid = INTEL_DEVID(i915); + u32 mask = 0; + + /* Make sure IS_<platform> checks are working. */ + RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb); + + /* Find and mark subplatform bits based on the PCI device id. */ + if (find_devid(devid, subplatform_ult_ids, + ARRAY_SIZE(subplatform_ult_ids))) { + mask = BIT(INTEL_SUBPLATFORM_ULT); + } else if (find_devid(devid, subplatform_ulx_ids, + ARRAY_SIZE(subplatform_ulx_ids))) { + mask = BIT(INTEL_SUBPLATFORM_ULX); + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + /* ULX machines are also considered ULT. */ + mask |= BIT(INTEL_SUBPLATFORM_ULT); + } + } else if (find_devid(devid, subplatform_aml_ids, + ARRAY_SIZE(subplatform_aml_ids))) { + mask = BIT(INTEL_SUBPLATFORM_AML); + } else if (find_devid(devid, subplatform_portf_ids, + ARRAY_SIZE(subplatform_portf_ids))) { + mask = BIT(INTEL_SUBPLATFORM_PORTF); + } + + GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS); + + RUNTIME_INFO(i915)->platform_mask[pi] |= mask; +} + /** * intel_device_info_runtime_init - initialize runtime info - * @info: intel device info struct + * @dev_priv: the i915 device * * Determine various intel_device_info fields at runtime. * @@ -739,29 +823,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) * - after the PCH has been detected, * - before the first usage of the fields it can tweak. */ -void intel_device_info_runtime_init(struct intel_device_info *info) +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = - container_of(info, struct drm_i915_private, info); + struct intel_device_info *info = mkwrite_device_info(dev_priv); + struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); enum pipe pipe; if (INTEL_GEN(dev_priv) >= 10) { for_each_pipe(dev_priv, pipe) - info->num_scalers[pipe] = 2; - } else if (IS_GEN9(dev_priv)) { - info->num_scalers[PIPE_A] = 2; - info->num_scalers[PIPE_B] = 2; - info->num_scalers[PIPE_C] = 1; + runtime->num_scalers[pipe] = 2; + } else if (IS_GEN(dev_priv, 9)) { + runtime->num_scalers[PIPE_A] = 2; + runtime->num_scalers[PIPE_B] = 2; + runtime->num_scalers[PIPE_C] = 1; } - BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); + BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); - if (IS_GEN11(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 6; - else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) + runtime->num_sprites[pipe] = 6; + else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv)) for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 3; + runtime->num_sprites[pipe] = 3; else if (IS_BROXTON(dev_priv)) { /* * Skylake and Broxton currently don't expose the topmost plane as its @@ -772,22 +856,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info) * down the line. */ - info->num_sprites[PIPE_A] = 2; - info->num_sprites[PIPE_B] = 2; - info->num_sprites[PIPE_C] = 1; + runtime->num_sprites[PIPE_A] = 2; + runtime->num_sprites[PIPE_B] = 2; + runtime->num_sprites[PIPE_C] = 1; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 2; + runtime->num_sprites[pipe] = 2; } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 1; + runtime->num_sprites[pipe] = 1; } if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (HAS_DISPLAY(dev_priv) && - (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && + (IS_GEN_RANGE(dev_priv, 7, 8)) && HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -811,7 +895,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info) DRM_INFO("PipeC fused off\n"); info->num_pipes -= 1; } - } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) { + } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); u8 disabled_mask = 0; bool invalid; @@ -851,20 +935,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info) cherryview_sseu_info_init(dev_priv); else if (IS_BROADWELL(dev_priv)) broadwell_sseu_info_init(dev_priv); - else if (IS_GEN9(dev_priv)) + else if (IS_GEN(dev_priv, 9)) gen9_sseu_info_init(dev_priv); - else if (IS_GEN10(dev_priv)) + else if (IS_GEN(dev_priv, 10)) gen10_sseu_info_init(dev_priv); else if (INTEL_GEN(dev_priv) >= 11) gen11_sseu_info_init(dev_priv); - if (IS_GEN6(dev_priv) && intel_vtd_active()) { + if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { DRM_INFO("Disabling ppGTT for VT-d support\n"); - info->ppgtt = INTEL_PPGTT_NONE; + info->ppgtt_type = INTEL_PPGTT_NONE; } /* Initialize command stream timestamp frequency */ - info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); + runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); } void intel_driver_caps_print(const struct intel_driver_caps *caps, @@ -884,37 +968,52 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); - u32 media_fuse; + unsigned int logical_vdbox = 0; unsigned int i; + u32 media_fuse; + u16 vdbox_mask; + u16 vebox_mask; if (INTEL_GEN(dev_priv) < 11) return; media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); - info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; - DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable); for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(dev_priv, _VCS(i))) continue; - if (!(BIT(i) & info->vdbox_enable)) { - info->ring_mask &= ~ENGINE_MASK(_VCS(i)); + if (!(BIT(i) & vdbox_mask)) { + info->engine_mask &= ~BIT(_VCS(i)); DRM_DEBUG_DRIVER("vcs%u fused off\n", i); + continue; } + + /* + * In Gen11, only even numbered logical VDBOXes are + * hooked up to an SFC (Scaler & Format Converter) unit. + */ + if (logical_vdbox++ % 2 == 0) + RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } + DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(dev_priv)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); - DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable); for (i = 0; i < I915_MAX_VECS; i++) { if (!HAS_ENGINE(dev_priv, _VECS(i))) continue; - if (!(BIT(i) & info->vebox_enable)) { - info->ring_mask &= ~ENGINE_MASK(_VECS(i)); + if (!(BIT(i) & vebox_mask)) { + info->engine_mask &= ~BIT(_VECS(i)); DRM_DEBUG_DRIVER("vecs%u fused off\n", i); } } + DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(dev_priv)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 1caf24e2cf0b..0e579f158016 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -27,6 +27,7 @@ #include <uapi/drm/i915_drm.h> +#include "intel_engine_types.h" #include "intel_display.h" struct drm_printer; @@ -73,14 +74,29 @@ enum intel_platform { INTEL_CANNONLAKE, /* gen11 */ INTEL_ICELAKE, + INTEL_ELKHARTLAKE, INTEL_MAX_PLATFORMS }; -enum intel_ppgtt { +/* + * Subplatform bits share the same namespace per parent platform. In other words + * it is fine for the same bit to be used on multiple parent platforms. + */ + +#define INTEL_SUBPLATFORM_BITS (3) + +/* HSW/BDW/SKL/KBL/CFL */ +#define INTEL_SUBPLATFORM_ULT (0) +#define INTEL_SUBPLATFORM_ULX (1) +#define INTEL_SUBPLATFORM_AML (2) + +/* CNL/ICL */ +#define INTEL_SUBPLATFORM_PORTF (0) + +enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL, - INTEL_PPGTT_FULL_4LVL, }; #define DEV_INFO_FOR_EACH_FLAG(func) \ @@ -89,6 +105,7 @@ enum intel_ppgtt { func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ + func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ func(has_fpga_dbg); \ func(has_guc); \ @@ -114,7 +131,7 @@ enum intel_ppgtt { func(has_ddi); \ func(has_dp_mst); \ func(has_fbc); \ - func(has_gmch_display); \ + func(has_gmch); \ func(has_hotplug); \ func(has_ipc); \ func(has_overlay); \ @@ -149,28 +166,23 @@ struct sseu_dev_info { u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; }; -typedef u8 intel_ring_mask_t; - struct intel_device_info { - u16 device_id; u16 gen_mask; u8 gen; u8 gt; /* GT number, 0 if undefined */ - u8 num_rings; - intel_ring_mask_t ring_mask; /* Rings supported by the HW */ + intel_engine_mask_t engine_mask; /* Engines supported by the HW */ enum intel_platform platform; - u32 platform_mask; - enum intel_ppgtt ppgtt; + enum intel_ppgtt_type ppgtt_type; + unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ + unsigned int page_sizes; /* page sizes supported by the HW */ u32 display_mmio_offset; u8 num_pipes; - u8 num_sprites[I915_MAX_PIPES]; - u8 num_scalers[I915_MAX_PIPES]; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); @@ -189,19 +201,39 @@ struct intel_device_info { int trans_offsets[I915_MAX_TRANSCODERS]; int cursor_offsets[I915_MAX_PIPES]; + struct color_luts { + u16 degamma_lut_size; + u16 gamma_lut_size; + u32 degamma_lut_tests; + u32 gamma_lut_tests; + } color; +}; + +struct intel_runtime_info { + /* + * Platform mask is used for optimizing or-ed IS_PLATFORM calls into + * into single runtime conditionals, and also to provide groundwork + * for future per platform, or per SKU build optimizations. + * + * Array can be extended when necessary if the corresponding + * BUILD_BUG_ON is hit. + */ + u32 platform_mask[2]; + + u16 device_id; + + u8 num_sprites[I915_MAX_PIPES]; + u8 num_scalers[I915_MAX_PIPES]; + + u8 num_engines; + /* Slice/subslice/EU info */ struct sseu_dev_info sseu; u32 cs_timestamp_frequency_khz; - /* Enabled (not fused off) media engine bitmasks. */ - u8 vdbox_enable; - u8 vebox_enable; - - struct color_luts { - u16 degamma_lut_size; - u16 gamma_lut_size; - } color; + /* Media engine access to SFC per instance */ + u8 vdbox_sfc_access; }; struct intel_driver_caps { @@ -258,12 +290,11 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu, const char *intel_platform_name(enum intel_platform platform); -void intel_device_info_runtime_init(struct intel_device_info *info); -void intel_device_info_dump(const struct intel_device_info *info, - struct drm_printer *p); +void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); void intel_device_info_dump_flags(const struct intel_device_info *info, struct drm_printer *p); -void intel_device_info_dump_runtime(const struct intel_device_info *info, +void intel_device_info_dump_runtime(const struct intel_runtime_info *info, struct drm_printer *p); void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 07c861884c70..7ecfb7d98839 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -24,33 +24,44 @@ * Eric Anholt <eric@anholt.net> */ -#include <linux/module.h> -#include <linux/input.h> #include <linux/i2c.h> +#include <linux/input.h> +#include <linux/intel-iommu.h> #include <linux/kernel.h> +#include <linux/module.h> +#include <linux/reservation.h> #include <linux/slab.h> #include <linux/vgaarb.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drmP.h> -#include "intel_drv.h" -#include "intel_frontbuffer.h" +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_rect.h> #include <drm/i915_drm.h> + #include "i915_drv.h" #include "i915_gem_clflush.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include "intel_dsi.h" +#include "intel_frontbuffer.h" + +#include "intel_drv.h" #include "intel_dsi.h" +#include "intel_frontbuffer.h" + +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "i915_reset.h" #include "i915_trace.h" -#include <drm/drm_atomic.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_dp_helper.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_plane_helper.h> -#include <drm/drm_rect.h> -#include <drm/drm_atomic_uapi.h> -#include <linux/dma_remapping.h> -#include <linux/reservation.h> /* Primary plane formats for gen <= 3 */ -static const uint32_t i8xx_primary_formats[] = { +static const u32 i8xx_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB1555, @@ -58,7 +69,7 @@ static const uint32_t i8xx_primary_formats[] = { }; /* Primary plane formats for gen >= 4 */ -static const uint32_t i965_primary_formats[] = { +static const u32 i965_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -67,18 +78,18 @@ static const uint32_t i965_primary_formats[] = { DRM_FORMAT_XBGR2101010, }; -static const uint64_t i9xx_format_modifiers[] = { +static const u64 i9xx_format_modifiers[] = { I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* Cursor formats */ -static const uint32_t intel_cursor_formats[] = { +static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static const uint64_t cursor_format_modifiers[] = { +static const u64 cursor_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; @@ -494,7 +505,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } -static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) +static u32 i9xx_dpll_compute_m(struct dpll *dpll) { return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } @@ -529,8 +540,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) clock->p = clock->p1 * clock->p2; if (WARN_ON(clock->n == 0 || clock->p == 0)) return 0; - clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, - clock->n << 22); + clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m, + clock->n << 22); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot / 5; @@ -584,7 +595,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* @@ -592,7 +603,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, * We haven't figured out how to reliably set up different * single/dual channel state, if we even can. */ - if (intel_is_dual_link_lvds(dev)) + if (intel_is_dual_link_lvds(dev_priv)) return limit->p2.p2_fast; else return limit->p2.p2_slow; @@ -892,7 +903,7 @@ chv_find_best_dpll(const struct intel_limit *limit, struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; struct dpll clock; - uint64_t m2; + u64 m2; int found = false; memset(best_clock, 0, sizeof(*best_clock)); @@ -914,7 +925,7 @@ chv_find_best_dpll(const struct intel_limit *limit, clock.p = clock.p1 * clock.p2; - m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * + m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p * clock.n) << 22, refclk * clock.m1); if (m2 > INT_MAX/clock.m1) @@ -940,14 +951,15 @@ chv_find_best_dpll(const struct intel_limit *limit, return found; } -bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, +bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock) { int refclk = 100000; const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, - target_clock, refclk, NULL, best_clock); + crtc_state->port_clock, refclk, + NULL, best_clock); } bool intel_crtc_active(struct intel_crtc *crtc) @@ -984,7 +996,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, u32 line1, line2; u32 line_mask; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) line_mask = DSL_LINEMASK_GEN2; else line_mask = DSL_LINEMASK_GEN3; @@ -1028,7 +1040,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, I965_PIPECONF_ACTIVE, 0, 100)) WARN(1, "pipe_off wait timed out\n"); @@ -1110,7 +1122,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -1198,17 +1210,19 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) state = true; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (wakeref) { u32 val = I915_READ(PIPECONF(cpu_transcoder)); cur_state = !!(val & PIPECONF_ENABLE); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } else { cur_state = false; } @@ -1332,7 +1346,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, @@ -1385,7 +1399,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc, I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); /* Check PLL is locked */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1)) DRM_ERROR("PLL %d failed to lock\n", pipe); @@ -1428,17 +1442,12 @@ static void chv_enable_pll(struct intel_crtc *crtc, } } -static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) +static bool i9xx_has_pps(struct drm_i915_private *dev_priv) { - struct intel_crtc *crtc; - int count = 0; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - count += crtc->base.state->active && - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); - } + if (IS_I830(dev_priv)) + return false; - return count; + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } static void i9xx_enable_pll(struct intel_crtc *crtc, @@ -1452,29 +1461,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, assert_pipe_disabled(dev_priv, crtc->pipe); /* PLL is protected by panel, make sure we can write it */ - if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) + if (i9xx_has_pps(dev_priv)) assert_panel_unlocked(dev_priv, crtc->pipe); - /* Enable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { - /* - * It appears to be important that we don't enable this - * for the current pipe before otherwise configuring the - * PLL. No idea how this should be handled if multiple - * DVO outputs are enabled simultaneosly. - */ - dpll |= DPLL_DVO_2X_MODE; - I915_WRITE(DPLL(!crtc->pipe), - I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); - } - /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - I915_WRITE(reg, 0); - + I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); I915_WRITE(reg, dpll); /* Wait for the clocks to stabilize. */ @@ -1507,16 +1502,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - /* Disable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) && - !intel_num_dvo_pipes(dev_priv)) { - I915_WRITE(DPLL(PIPE_B), - I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); - I915_WRITE(DPLL(PIPE_A), - I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); - } - /* Don't disable pipe or pipe PLLs if needed */ if (IS_I830(dev_priv)) return; @@ -1595,7 +1580,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, BUG(); } - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, dpll_reg, port_mask, expected_mask, 1000)) WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", @@ -1609,7 +1594,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; - uint32_t val, pipeconf_val; + u32 val, pipeconf_val; /* Make sure PCH DPLL is enabled */ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); @@ -1645,17 +1630,18 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; - else + } else { val |= TRANS_PROGRESSIVE; + } I915_WRITE(reg, val | TRANS_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); @@ -1685,7 +1671,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(LPT_TRANSCONF, val); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LPT_TRANSCONF, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, @@ -1697,7 +1683,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t reg; - uint32_t val; + u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -1711,7 +1697,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, TRANS_STATE_ENABLE, 0, 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); @@ -1733,7 +1719,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) val &= ~TRANS_ENABLE; I915_WRITE(LPT_TRANSCONF, val); /* wait for PCH transcoder off, transcoder state */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 50)) DRM_ERROR("Failed to disable PCH transcoder\n"); @@ -1754,6 +1740,35 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) return crtc->pipe; } +static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* + * On i965gm the hardware frame counter reads + * zero when the TV encoder is enabled :( + */ + if (IS_I965GM(dev_priv) && + (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) + return 0; + + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + return 0xffffffff; /* full 32 bit counter */ + else if (INTEL_GEN(dev_priv) >= 3) + return 0xffffff; /* only 24 bits of frame count */ + else + return 0; /* Gen2 doesn't have a hardware frame counter */ +} + +static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + drm_crtc_set_max_vblank_count(&crtc->base, + intel_crtc_max_vblank_count(crtc_state)); + drm_crtc_vblank_on(&crtc->base); +} + static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); @@ -1772,7 +1787,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ - if (HAS_GMCH_DISPLAY(dev_priv)) { + if (HAS_GMCH(dev_priv)) { if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else @@ -1788,6 +1803,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } + trace_intel_pipe_enable(dev_priv, pipe); + reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); if (val & PIPECONF_ENABLE) { @@ -1806,7 +1823,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) * when it's derived from the timestamps. So let's wait for the * pipe to start properly before we call drm_crtc_vblank_on() */ - if (dev_priv->drm.max_vblank_count == 0) + if (intel_crtc_max_vblank_count(new_crtc_state) == 0) intel_wait_for_pipe_scanline_moving(crtc); } @@ -1827,6 +1844,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); + trace_intel_pipe_disable(dev_priv, pipe); + reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) @@ -1850,7 +1869,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) { - return IS_GEN2(dev_priv) ? 2048 : 4096; + return IS_GEN(dev_priv, 2) ? 2048 : 4096; } static unsigned int @@ -1863,7 +1882,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) case DRM_FORMAT_MOD_LINEAR: return cpp; case I915_FORMAT_MOD_X_TILED: - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) return 128; else return 512; @@ -1872,7 +1891,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) return 128; /* fall through */ case I915_FORMAT_MOD_Y_TILED: - if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) + if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) return 128; else return 512; @@ -2024,6 +2043,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); + intel_wakeref_t wakeref; struct i915_vma *vma; unsigned int pinctl; u32 alignment; @@ -2047,7 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2060,7 +2080,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * complicated than this. For example, Cherryview appears quite * happy to scanout from anywhere within its global aperture. */ - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) pinctl |= PIN_MAPPABLE; vma = i915_gem_object_pin_to_display_plane(obj, @@ -2102,7 +2122,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return vma; } @@ -2373,7 +2393,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, return 0; } -static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) +static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) { switch (fb_modifier) { case I915_FORMAT_MOD_X_TILED: @@ -2634,6 +2654,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_RGB565; case PLANE_CTL_FORMAT_NV12: return DRM_FORMAT_NV12; + case PLANE_CTL_FORMAT_P010: + return DRM_FORMAT_P010; + case PLANE_CTL_FORMAT_P012: + return DRM_FORMAT_P012; + case PLANE_CTL_FORMAT_P016: + return DRM_FORMAT_P016; + case PLANE_CTL_FORMAT_Y210: + return DRM_FORMAT_Y210; + case PLANE_CTL_FORMAT_Y212: + return DRM_FORMAT_Y212; + case PLANE_CTL_FORMAT_Y216: + return DRM_FORMAT_Y216; + case PLANE_CTL_FORMAT_Y410: + return DRM_FORMAT_XVYU2101010; + case PLANE_CTL_FORMAT_Y412: + return DRM_FORMAT_XVYU12_16161616; + case PLANE_CTL_FORMAT_Y416: + return DRM_FORMAT_XVYU16161616; default: case PLANE_CTL_FORMAT_XRGB_8888: if (rgb_order) { @@ -2652,6 +2690,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_XBGR2101010; else return DRM_FORMAT_XRGB2101010; + case PLANE_CTL_FORMAT_XRGB_16161616F: + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR16161616F; + else + return DRM_FORMAT_XBGR16161616F; + } else { + if (alpha) + return DRM_FORMAT_ARGB16161616F; + else + return DRM_FORMAT_XRGB16161616F; + } } } @@ -2782,8 +2832,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc_state); + intel_disable_plane(plane, crtc_state); } static void @@ -3133,7 +3182,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since * the main surface setup depends on it. */ - if (fb->format->format == DRM_FORMAT_NV12) { + if (is_planar_yuv_format(fb->format->format)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3161,7 +3210,7 @@ i9xx_plane_max_stride(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - if (!HAS_GMCH_DISPLAY(dev_priv)) { + if (!HAS_GMCH(dev_priv)) { return 32*1024; } else if (INTEL_GEN(dev_priv) >= 4) { if (modifier == I915_FORMAT_MOD_X_TILED) @@ -3181,28 +3230,39 @@ i9xx_plane_max_stride(struct intel_plane *plane, } } +static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 dspcntr = 0; + + if (crtc_state->gamma_enable) + dspcntr |= DISPPLANE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; + + if (INTEL_GEN(dev_priv) < 5) + dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); + + return dspcntr; +} + static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; u32 dspcntr; - dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; + dspcntr = DISPLAY_PLANE_ENABLE; - if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || - IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) + if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || + IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; - - if (INTEL_GEN(dev_priv) < 5) - dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); - switch (fb->format->format) { case DRM_FORMAT_C8: dspcntr |= DISPPLANE_8BPP; @@ -3330,11 +3390,13 @@ static void i9xx_update_plane(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 linear_offset; - u32 dspcntr = plane_state->ctl; int x = plane_state->color_plane[0].x; int y = plane_state->color_plane[0].y; unsigned long irqflags; u32 dspaddr_offset; + u32 dspcntr; + + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); @@ -3394,10 +3456,23 @@ static void i9xx_disable_plane(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; unsigned long irqflags; + u32 dspcntr; + + /* + * DSPCNTR pipe gamma enable on g4x+ and pipe csc + * enable on ilk+ affect the pipe bottom color as + * well, so we must configure them even if the plane + * is disabled. + * + * On pre-g4x there is no way to gamma correct the + * pipe bottom color but we'll keep on doing this + * anyway so that the crtc state readout works correctly. + */ + dspcntr = i9xx_plane_ctl_crtc(crtc_state); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(DSPCNTR(i9xx_plane), 0); + I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); if (INTEL_GEN(dev_priv) >= 4) I915_WRITE_FW(DSPSURF(i9xx_plane), 0); else @@ -3412,6 +3487,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + intel_wakeref_t wakeref; bool ret; u32 val; @@ -3421,7 +3497,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; val = I915_READ(DSPCNTR(i9xx_plane)); @@ -3434,7 +3511,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> DISPPLANE_SEL_PIPE_SHIFT; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -3503,7 +3580,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state, return stride / skl_plane_stride_mult(fb, color_plane, rotation); } -static u32 skl_plane_ctl_format(uint32_t pixel_format) +static u32 skl_plane_ctl_format(u32 pixel_format) { switch (pixel_format) { case DRM_FORMAT_C8: @@ -3520,6 +3597,12 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format) return PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR2101010: return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F; case DRM_FORMAT_YUYV: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; case DRM_FORMAT_YVYU: @@ -3530,6 +3613,24 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format) return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; + case DRM_FORMAT_P010: + return PLANE_CTL_FORMAT_P010; + case DRM_FORMAT_P012: + return PLANE_CTL_FORMAT_P012; + case DRM_FORMAT_P016: + return PLANE_CTL_FORMAT_P016; + case DRM_FORMAT_Y210: + return PLANE_CTL_FORMAT_Y210; + case DRM_FORMAT_Y212: + return PLANE_CTL_FORMAT_Y212; + case DRM_FORMAT_Y216: + return PLANE_CTL_FORMAT_Y216; + case DRM_FORMAT_XVYU2101010: + return PLANE_CTL_FORMAT_Y410; + case DRM_FORMAT_XVYU12_16161616: + return PLANE_CTL_FORMAT_Y412; + case DRM_FORMAT_XVYU16161616: + return PLANE_CTL_FORMAT_Y416; default: MISSING_CASE(pixel_format); } @@ -3573,7 +3674,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state } } -static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) +static u32 skl_plane_ctl_tiling(u64 fb_modifier) { switch (fb_modifier) { case DRM_FORMAT_MOD_LINEAR: @@ -3632,6 +3733,23 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect) return 0; } +u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 plane_ctl = 0; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + return plane_ctl; + + if (crtc_state->gamma_enable) + plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; + + return plane_ctl; +} + u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -3646,10 +3764,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { plane_ctl |= skl_plane_ctl_alpha(plane_state); - plane_ctl |= - PLANE_CTL_PIPE_GAMMA_ENABLE | - PLANE_CTL_PIPE_CSC_ENABLE | - PLANE_CTL_PLANE_GAMMA_DISABLE; + plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; @@ -3674,6 +3789,23 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, return plane_ctl; } +u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 plane_color_ctl = 0; + + if (INTEL_GEN(dev_priv) >= 11) + return plane_color_ctl; + + if (crtc_state->gamma_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; + + return plane_color_ctl; +} + u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -3683,14 +3815,10 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 plane_color_ctl = 0; - if (INTEL_GEN(dev_priv) < 11) { - plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; - plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; - } plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); - if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) { + if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else @@ -3735,7 +3863,7 @@ __intel_display_resume(struct drm_device *dev, } /* ignore any reset values/BIOS leftovers in the WM registers */ - if (!HAS_GMCH_DISPLAY(to_i915(dev))) + if (!HAS_GMCH(to_i915(dev))) to_intel_atomic_state(state)->skip_intermediate_wm = true; ret = drm_atomic_helper_commit_duplicated_state(state, ctx); @@ -3746,8 +3874,8 @@ __intel_display_resume(struct drm_device *dev, static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) { - return intel_has_gpu_reset(dev_priv) && - INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); + return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && + intel_has_gpu_reset(dev_priv)); } void intel_prepare_reset(struct drm_i915_private *dev_priv) @@ -3832,9 +3960,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * The display has been reset as well, * so need a full re-initialization. */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); - intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); intel_init_clock_gating(dev_priv); @@ -3860,6 +3985,30 @@ unlock: clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); } +static void icl_set_pipe_chicken(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 tmp; + + tmp = I915_READ(PIPE_CHICKEN(pipe)); + + /* + * Display WA #1153: icl + * enable hardware to bypass the alpha math + * and rounding for per-pixel values 00 and 0xff + */ + tmp |= PER_PIXEL_ALPHA_BYPASS_EN; + /* + * Display WA # 1605353570: icl + * Set the pixel rounding bit to 1 for allowing + * passthrough of Frame buffer pixels unmodified + * across pipe + */ + tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; + I915_WRITE(PIPE_CHICKEN(pipe), tmp); +} + static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { @@ -3894,6 +4043,9 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta else if (old_crtc_state->pch_pfit.enabled) ironlake_pfit_disable(old_crtc_state); } + + if (INTEL_GEN(dev_priv) >= 11) + icl_set_pipe_chicken(crtc); } static void intel_fdi_normal_train(struct intel_crtc *crtc) @@ -4120,7 +4272,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; @@ -4593,7 +4745,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) { - uint32_t temp; + u32 temp; temp = I915_READ(SOUTH_CHICKEN1); if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) @@ -4910,19 +5062,19 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (format && format->format == DRM_FORMAT_NV12 && + if (format && is_planar_yuv_format(format->format) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { - DRM_DEBUG_KMS("NV12: src dimensions not met\n"); + DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); return -EINVAL; } /* range checks */ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || - (IS_GEN11(dev_priv) && + (INTEL_GEN(dev_priv) >= 11 && (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || - (!IS_GEN11(dev_priv) && + (INTEL_GEN(dev_priv) < 11 && (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " @@ -4979,14 +5131,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, { struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); struct drm_framebuffer *fb = plane_state->base.fb; int ret; bool force_detach = !fb || !plane_state->base.visible; bool need_scaler = false; /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ - if (!icl_is_hdr_plane(intel_plane) && - fb && fb->format->format == DRM_FORMAT_NV12) + if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && + fb && is_planar_yuv_format(fb->format->format)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, @@ -5018,11 +5171,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: break; default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", @@ -5133,7 +5299,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, IPS_CTL, IPS_ENABLE, IPS_ENABLE, 50)) DRM_ERROR("Timed out waiting for IPS enable\n"); @@ -5158,7 +5324,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, IPS_CTL, IPS_ENABLE, 0, 100)) DRM_ERROR("Timed out waiting for IPS disable\n"); @@ -5213,7 +5379,7 @@ intel_post_enable_primary(struct drm_crtc *crtc, * FIXME: Need to fix the logic to work when we turn off all planes * but leave the pipe running. */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); /* Underruns don't always raise interrupts, so check manually. */ @@ -5234,7 +5400,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled. */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); hsw_disable_ips(to_intel_crtc_state(crtc->state)); @@ -5248,7 +5414,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (HAS_GMCH_DISPLAY(dev_priv) && + if (HAS_GMCH(dev_priv) && intel_set_memory_cxsr(dev_priv, false)) intel_wait_for_vblank(dev_priv, pipe); } @@ -5256,18 +5422,36 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + if (!old_crtc_state->ips_enabled) return false; if (needs_modeset(&new_crtc_state->base)) return true; + /* + * Workaround : Do not read or write the pipe palette/gamma data while + * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. + * + * Disable IPS before we program the LUT. + */ + if (IS_HASWELL(dev_priv) && + (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe) && + new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) + return true; + return !new_crtc_state->ips_enabled; } static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + if (!new_crtc_state->ips_enabled) return false; @@ -5275,6 +5459,18 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s return true; /* + * Workaround : Do not read or write the pipe palette/gamma data while + * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. + * + * Re-enable IPS after the LUT has been programmed. + */ + if (IS_HASWELL(dev_priv) && + (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe) && + new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) + return true; + + /* * We can't read out IPS on broadwell, assume the worst and * forcibly enable IPS on the first fastset. */ @@ -5292,7 +5488,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv, return false; /* WA Display #0827: Gen9:all */ - if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) return true; return false; @@ -5365,7 +5561,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled. */ - if (IS_GEN2(dev_priv) && old_primary_state->visible && + if (IS_GEN(dev_priv, 2) && old_primary_state->visible && (modeset || !new_primary_state->base.visible)) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); } @@ -5385,7 +5581,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active && + if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) intel_wait_for_vblank(dev_priv, crtc->pipe); @@ -5447,7 +5643,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - plane->disable_plane(plane, new_crtc_state); + intel_disable_plane(plane, new_crtc_state); if (old_plane_state->base.visible) fb_bits |= plane->frontbuffer_bit; @@ -5578,6 +5774,34 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, } } +static void intel_encoders_update_pipe(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *conn_state; + struct drm_connector *conn; + int i; + + for_each_new_connector_in_state(old_state, conn, conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + + if (conn_state->crtc != crtc) + continue; + + if (encoder->update_pipe) + encoder->update_pipe(encoder, crtc_state, conn_state); + } +} + +static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + plane->disable_plane(plane, crtc_state); +} + static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_atomic_state *old_state) { @@ -5641,7 +5865,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(&pipe_config->base); + intel_color_load_luts(pipe_config); + intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) dev_priv->display.initial_watermarks(old_intel_state, pipe_config); @@ -5651,7 +5878,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, ironlake_pch_enable(old_intel_state, pipe_config); assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); + intel_crtc_vblank_on(pipe_config); intel_encoders_enable(crtc, pipe_config, old_state); @@ -5696,7 +5923,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = MBUS_DBOX_A_CREDIT(2); val |= MBUS_DBOX_BW_CREDIT(1); @@ -5716,7 +5943,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); bool psl_clkgate_wa; - u32 pipe_chicken; if (WARN_ON(intel_crtc->active)) return; @@ -5752,8 +5978,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, haswell_set_pipemisc(pipe_config); - intel_color_set_csc(&pipe_config->base); - intel_crtc->active = true; /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ @@ -5771,18 +5995,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(&pipe_config->base); + intel_color_load_luts(pipe_config); + intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + if (INTEL_GEN(dev_priv) < 9) + intel_disable_primary_plane(pipe_config); - /* - * Display WA #1153: enable hardware to bypass the alpha math - * and rounding for per-pixel values 00 and 0xff - */ - if (INTEL_GEN(dev_priv) >= 11) { - pipe_chicken = I915_READ(PIPE_CHICKEN(pipe)); - if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN)) - I915_WRITE_FW(PIPE_CHICKEN(pipe), - pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN); - } + if (INTEL_GEN(dev_priv) >= 11) + icl_set_pipe_chicken(intel_crtc); intel_ddi_set_pipe_settings(pipe_config); if (!transcoder_is_dsi(cpu_transcoder)) @@ -5805,7 +6025,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_ddi_set_vc_payload_alloc(pipe_config, true); assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); + intel_crtc_vblank_on(pipe_config); intel_encoders_enable(crtc, pipe_config, old_state); @@ -5960,7 +6180,10 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) if (port == PORT_NONE) return false; - if (IS_ICELAKE(dev_priv)) + if (IS_ELKHARTLAKE(dev_priv)) + return port <= PORT_C; + + if (INTEL_GEN(dev_priv) >= 11) return port <= PORT_B; return false; @@ -5968,7 +6191,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) { - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) return port >= PORT_C && port <= PORT_F; return false; @@ -6087,7 +6310,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain; for_each_power_domain(domain, domains) - intel_display_power_put(dev_priv, domain); + intel_display_power_put_unchecked(dev_priv, domain); } static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, @@ -6117,8 +6340,6 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, i9xx_set_pipeconf(pipe_config); - intel_color_set_csc(&pipe_config->base); - intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); @@ -6137,14 +6358,17 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, i9xx_pfit_enable(pipe_config); - intel_color_load_luts(&pipe_config->base); + intel_color_load_luts(pipe_config); + intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); dev_priv->display.initial_watermarks(old_intel_state, pipe_config); intel_enable_pipe(pipe_config); assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); + intel_crtc_vblank_on(pipe_config); intel_encoders_enable(crtc, pipe_config, old_state); } @@ -6184,7 +6408,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_crtc->active = true; - if (!IS_GEN2(dev_priv)) + if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_encoders_pre_enable(crtc, pipe_config, old_state); @@ -6193,7 +6417,10 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, i9xx_pfit_enable(pipe_config); - intel_color_load_luts(&pipe_config->base); + intel_color_load_luts(pipe_config); + intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) dev_priv->display.initial_watermarks(old_intel_state, @@ -6203,7 +6430,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_enable_pipe(pipe_config); assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); + intel_crtc_vblank_on(pipe_config); intel_encoders_enable(crtc, pipe_config, old_state); } @@ -6236,7 +6463,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) intel_wait_for_vblank(dev_priv, pipe); intel_encoders_disable(crtc, old_crtc_state, old_state); @@ -6261,7 +6488,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); - if (!IS_GEN2(dev_priv)) + if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); if (!dev_priv->display.initial_watermarks) @@ -6334,7 +6561,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, domains = intel_crtc->enabled_power_domains; for_each_power_domain(domain, domains) - intel_display_power_put(dev_priv, domain); + intel_display_power_put_unchecked(dev_priv, domain); intel_crtc->enabled_power_domains = 0; dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); @@ -6576,7 +6803,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) if (!hsw_crtc_state_ips_capable(crtc_state)) return false; - if (crtc_state->ips_force_disable) + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + if (crtc_state->crc_enabled) return false; /* IPS should be fine as long as at least one plane is enabled. */ @@ -6600,9 +6833,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); } -static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) +static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) { - uint32_t pixel_rate; + u32 pixel_rate; pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; @@ -6612,8 +6845,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) */ if (pipe_config->pch_pfit.enabled) { - uint64_t pipe_w, pipe_h, pfit_w, pfit_h; - uint32_t pfit_size = pipe_config->pch_pfit.size; + u64 pipe_w, pipe_h, pfit_w, pfit_h; + u32 pfit_size = pipe_config->pch_pfit.size; pipe_w = pipe_config->pipe_src_w; pipe_h = pipe_config->pipe_src_h; @@ -6628,7 +6861,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) if (WARN_ON(!pfit_w || !pfit_h)) return pixel_rate; - pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, + pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h, pfit_w * pfit_h); } @@ -6639,7 +6872,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) /* FIXME calculate proper pipe pixel rate for GMCH pfit */ crtc_state->pixel_rate = crtc_state->base.adjusted_mode.crtc_clock; @@ -6651,8 +6884,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; @@ -6702,7 +6934,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && - intel_is_dual_link_lvds(dev)) { + intel_is_dual_link_lvds(dev_priv)) { DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); return -EINVAL; } @@ -6724,7 +6956,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } static void -intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) +intel_reduce_m_n_ratio(u32 *num, u32 *den) { while (*num > DATA_LINK_M_N_MASK || *den > DATA_LINK_M_N_MASK) { @@ -6734,7 +6966,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) } static void compute_m_n(unsigned int m, unsigned int n, - uint32_t *ret_m, uint32_t *ret_n, + u32 *ret_m, u32 *ret_n, bool constant_n) { /* @@ -6749,7 +6981,7 @@ static void compute_m_n(unsigned int m, unsigned int n, else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); - *ret_m = div_u64((uint64_t) m * *ret_n, n); + *ret_m = div_u64((u64)m * *ret_n, n); intel_reduce_m_n_ratio(ret_m, ret_n); } @@ -6779,12 +7011,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) +static u32 pnv_dpll_compute_fp(struct dpll *dpll) { return (1 << dpll->n) << 16 | dpll->m2; } -static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) +static u32 i9xx_dpll_compute_fp(struct dpll *dpll) { return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; } @@ -6868,7 +7100,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, * Strictly speaking some registers are available before * gen7, but we only support DRRS on gen7+ */ - return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv); + return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv); } static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, @@ -7319,7 +7551,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev_priv) && + /* + * Bspec: + * "[Almador Errata}: For the correct operation of the muxed DVO pins + * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, + * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock + * Enable) must be set to “1” in both the DPLL A Control Register + * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." + * + * For simplicity We simply keep both bits always enabled in + * both DPLLS. The spec says we should disable the DVO 2X clock + * when not needed, but this seems to work fine in practice. + */ + if (IS_I830(dev_priv) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; @@ -7340,7 +7584,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; - uint32_t crtc_vtotal, crtc_vblank_end; + u32 crtc_vtotal, crtc_vblank_end; int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise @@ -7415,7 +7659,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - uint32_t tmp; + u32 tmp; tmp = I915_READ(HTOTAL(cpu_transcoder)); pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; @@ -7486,7 +7730,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t pipeconf; + u32 pipeconf; pipeconf = 0; @@ -7527,13 +7771,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; - } else + } else { pipeconf |= PIPECONF_PROGRESSIVE; + } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && crtc_state->limited_color_range) pipeconf |= PIPECONF_COLOR_RANGE_SELECT; + pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(crtc->pipe), pipeconf); POSTING_READ(PIPECONF(crtc->pipe)); } @@ -7577,8 +7824,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, static int g4x_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 96000; @@ -7591,7 +7837,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } - if (intel_is_dual_link_lvds(dev)) + if (intel_is_dual_link_lvds(dev_priv)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; @@ -7727,14 +7973,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, return 0; } +static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) +{ + if (IS_I830(dev_priv)) + return false; + + return INTEL_GEN(dev_priv) >= 4 || + IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t tmp; + u32 tmp; - if (INTEL_GEN(dev_priv) <= 3 && - (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) + if (!i9xx_has_pfit(dev_priv)) return; tmp = I915_READ(PFIT_CONTROL); @@ -7941,16 +8195,36 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, pipe_config->output_format = output; } +static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + u32 tmp; + + tmp = I915_READ(DSPCNTR(i9xx_plane)); + + if (tmp & DISPPLANE_GAMMA_ENABLE) + crtc_state->gamma_enable = true; + + if (!HAS_GMCH(dev_priv) && + tmp & DISPPLANE_PIPE_CSC_ENABLE) + crtc_state->csc_enable = true; +} + static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; - uint32_t tmp; + intel_wakeref_t wakeref; + u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -7984,6 +8258,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> + PIPECONF_GAMMA_MODE_SHIFT; + + if (IS_CHERRYVIEW(dev_priv)) + pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); + + i9xx_get_pipe_color_config(pipe_config); + if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; @@ -8016,14 +8298,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - /* - * DPLL_DVO_2X_MODE must be enabled for both DPLLs - * on 830. Filter it out here so that we don't - * report errors due to that. - */ - if (IS_I830(dev_priv)) - pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; - pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); } else { @@ -8051,7 +8325,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ret = true; out: - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -8225,7 +8499,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(SOUTH_CHICKEN2); tmp |= FDI_MPHY_IOSFSB_RESET_CTL; @@ -8247,7 +8521,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) /* WaMPhyProgramming:hsw */ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); @@ -8328,7 +8602,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, bool with_spread, bool with_fdi) { - uint32_t reg, tmp; + u32 reg, tmp; if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) with_spread = true; @@ -8367,7 +8641,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, /* Sequence to disable CLKOUT_DP */ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) { - uint32_t reg, tmp; + u32 reg, tmp; mutex_lock(&dev_priv->sb_lock); @@ -8392,7 +8666,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) #define BEND_IDX(steps) ((50 + (steps)) / 5) -static const uint16_t sscdivintphase[] = { +static const u16 sscdivintphase[] = { [BEND_IDX( 50)] = 0x3B23, [BEND_IDX( 45)] = 0x3B23, [BEND_IDX( 40)] = 0x3C23, @@ -8424,7 +8698,7 @@ static const uint16_t sscdivintphase[] = { */ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) { - uint32_t tmp; + u32 tmp; int idx = BEND_IDX(steps); if (WARN_ON(steps % 5 != 0)) @@ -8490,7 +8764,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = 0; @@ -8523,6 +8797,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); POSTING_READ(PIPECONF(pipe)); } @@ -8603,13 +8879,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) return i9xx_dpll_compute_m(dpll) < factor * dpll->n; } -static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, +static void ironlake_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct dpll *reduced_clock) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll, fp, fp2; int factor; @@ -8618,10 +8892,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->vbt.lvds_ssc_freq == 100000) || - (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) + (HAS_PCH_IBX(dev_priv) && + intel_is_dual_link_lvds(dev_priv))) factor = 25; - } else if (crtc_state->sdvo_tv_clock) + } else if (crtc_state->sdvo_tv_clock) { factor = 20; + } fp = i9xx_dpll_compute_fp(&crtc_state->dpll); @@ -8708,8 +8984,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 120000; @@ -8727,7 +9002,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, refclk = dev_priv->vbt.lvds_ssc_freq; } - if (intel_is_dual_link_lvds(dev)) { + if (intel_is_dual_link_lvds(dev_priv)) { if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else @@ -8751,7 +9026,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, ironlake_compute_dpll(crtc, crtc_state, NULL); - if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) { + if (!intel_get_shared_dpll(crtc_state, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -8837,7 +9112,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; - uint32_t ps_ctrl = 0; + u32 ps_ctrl = 0; int id = -1; int i; @@ -8849,6 +9124,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, pipe_config->pch_pfit.enabled = true; pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); + scaler_state->scalers[i].in_use = true; break; } } @@ -8993,7 +9269,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t tmp; + u32 tmp; tmp = I915_READ(PF_CTL(crtc->pipe)); @@ -9005,7 +9281,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, /* We currently do not free assignements of panel fitters on * ivb/hsw (since we don't use the higher upscaling modes which * differentiates them) so just WARN about this case for now. */ - if (IS_GEN7(dev_priv)) { + if (IS_GEN(dev_priv, 7)) { WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); } @@ -9018,11 +9294,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; - uint32_t tmp; + intel_wakeref_t wakeref; + u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -9054,6 +9332,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (tmp & PIPECONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> + PIPECONF_GAMMA_MODE_SHIFT; + + pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + + i9xx_get_pipe_color_config(pipe_config); + if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; @@ -9105,7 +9390,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ret = true; out: - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -9145,7 +9430,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); } -static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) +static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) { if (IS_HASWELL(dev_priv)) return I915_READ(D_COMP_HSW); @@ -9153,7 +9438,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) return I915_READ(D_COMP_BDW); } -static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) +static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); @@ -9178,7 +9463,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, bool switch_to_fclk, bool allow_power_down) { - uint32_t val; + u32 val; assert_can_disable_lcpll(dev_priv); @@ -9199,7 +9484,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, I915_WRITE(LCPLL_CTL, val); POSTING_READ(LCPLL_CTL); - if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) + if (intel_wait_for_register(&dev_priv->uncore, + LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) DRM_ERROR("LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); @@ -9225,7 +9511,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, */ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; val = I915_READ(LCPLL_CTL); @@ -9237,7 +9523,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) * Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine. To prevent PC8 state, just enable force_wake. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -9254,7 +9540,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_PLL_DISABLE; I915_WRITE(LCPLL_CTL, val); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) DRM_ERROR("LCPLL not locked yet\n"); @@ -9269,7 +9555,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) DRM_ERROR("Switching back to LCPLL failed\n"); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -9300,7 +9586,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; DRM_DEBUG_KMS("Enabling package C8+\n"); @@ -9316,7 +9602,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; DRM_DEBUG_KMS("Disabling package C8+\n"); @@ -9338,11 +9624,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, to_intel_atomic_state(crtc_state->base.state); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || - IS_ICELAKE(dev_priv)) { + INTEL_GEN(dev_priv) >= 11) { struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { + if (!intel_get_shared_dpll(crtc_state, encoder)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9380,11 +9666,8 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, temp = I915_READ(DPCLKA_CFGCR0_ICL) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - - if (WARN_ON(!intel_dpll_is_combophy(id))) - return; } else if (intel_port_is_tc(dev_priv, port)) { - id = icl_port_to_mg_pll_id(port); + id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); } else { WARN(1, "Invalid port %x\n", port); return; @@ -9438,7 +9721,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; - uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); switch (ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: @@ -9476,15 +9759,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; - unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); + unsigned long panel_transcoder_mask = 0; unsigned long enabled_panel_transcoders = 0; enum transcoder panel_transcoder; u32 tmp; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); + if (HAS_TRANSCODER_EDP(dev_priv)) + panel_transcoder_mask |= BIT(TRANSCODER_EDP); + /* * The pipe->transcoder mapping is fixed with the exception of the eDP * and DSI transcoders handled below. @@ -9495,7 +9781,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ - for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) { + for_each_set_bit(panel_transcoder, + &panel_transcoder_mask, + ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { enum pipe trans_pipe; tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); @@ -9541,6 +9829,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + + WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); *power_domain_mask |= BIT_ULL(power_domain); tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); @@ -9568,6 +9858,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) continue; + + WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); *power_domain_mask |= BIT_ULL(power_domain); /* @@ -9602,13 +9894,13 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum port port; - uint32_t tmp; + u32 tmp; tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icelake_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cannonlake_get_ddi_pll(dev_priv, port, pipe_config); @@ -9671,7 +9963,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, goto out; if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || - IS_ICELAKE(dev_priv)) { + INTEL_GEN(dev_priv) >= 11) { haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -9679,12 +9971,27 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_src_size(crtc, pipe_config); intel_get_crtc_ycbcr_config(crtc, pipe_config); - pipe_config->gamma_mode = - I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; + pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); + + pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + + if (INTEL_GEN(dev_priv) >= 9) { + u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); + + if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) + pipe_config->gamma_enable = true; + + if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) + pipe_config->csc_enable = true; + } else { + i9xx_get_pipe_color_config(pipe_config); + } power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { + WARN_ON(power_domain_mask & BIT_ULL(power_domain)); power_domain_mask |= BIT_ULL(power_domain); + if (INTEL_GEN(dev_priv) >= 9) skylake_get_pfit_config(crtc, pipe_config); else @@ -9714,7 +10021,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, out: for_each_power_domain(power_domain, power_domain_mask) - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put_unchecked(dev_priv, power_domain); return active; } @@ -9735,7 +10042,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) base += plane_state->color_plane[0].offset; /* ILK+ do this automagically */ - if (HAS_GMCH_DISPLAY(dev_priv) && + if (HAS_GMCH(dev_priv) && plane_state->base.rotation & DRM_MODE_ROTATE_180) base += (plane_state->base.crtc_h * plane_state->base.crtc_w - 1) * fb->format->cpp[0]; @@ -9848,11 +10155,20 @@ i845_cursor_max_stride(struct intel_plane *plane, return 2048; } +static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + u32 cntl = 0; + + if (crtc_state->gamma_enable) + cntl |= CURSOR_GAMMA_ENABLE; + + return cntl; +} + static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { return CURSOR_ENABLE | - CURSOR_GAMMA_ENABLE | CURSOR_FORMAT_ARGB | CURSOR_STRIDE(plane_state->color_plane[0].stride); } @@ -9922,7 +10238,9 @@ static void i845_update_cursor(struct intel_plane *plane, unsigned int width = plane_state->base.crtc_w; unsigned int height = plane_state->base.crtc_h; - cntl = plane_state->ctl; + cntl = plane_state->ctl | + i845_cursor_ctl_crtc(crtc_state); + size = (height << 12) | width; base = intel_cursor_base(plane_state); @@ -9964,17 +10282,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(PIPE_A); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; *pipe = PIPE_A; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -9987,27 +10307,37 @@ i9xx_cursor_max_stride(struct intel_plane *plane, return plane->base.dev->mode_config.cursor_width * 4; } -static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 cntl = 0; - if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) - cntl |= MCURSOR_TRICKLE_FEED_DISABLE; + if (INTEL_GEN(dev_priv) >= 11) + return cntl; - if (INTEL_GEN(dev_priv) <= 10) { - cntl |= MCURSOR_GAMMA_ENABLE; + if (crtc_state->gamma_enable) + cntl = MCURSOR_GAMMA_ENABLE; - if (HAS_DDI(dev_priv)) - cntl |= MCURSOR_PIPE_CSC_ENABLE; - } + if (crtc_state->csc_enable) + cntl |= MCURSOR_PIPE_CSC_ENABLE; if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); + return cntl; +} + +static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + u32 cntl = 0; + + if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) + cntl |= MCURSOR_TRICKLE_FEED_DISABLE; + switch (plane_state->base.crtc_w) { case 64: cntl |= MCURSOR_MODE_64_ARGB_AX; @@ -10132,7 +10462,8 @@ static void i9xx_update_cursor(struct intel_plane *plane, unsigned long irqflags; if (plane_state && plane_state->base.visible) { - cntl = plane_state->ctl; + cntl = plane_state->ctl | + i9xx_cursor_ctl_crtc(crtc_state); if (plane_state->base.crtc_h != plane_state->base.crtc_w) fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); @@ -10197,6 +10528,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; u32 val; @@ -10206,7 +10538,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; val = I915_READ(CURCNTR(plane->pipe)); @@ -10219,7 +10552,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> MCURSOR_PIPE_SELECT_SHIFT; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -10468,7 +10801,7 @@ static int i9xx_pll_refclk(struct drm_device *dev, return dev_priv->vbt.lvds_ssc_freq; else if (HAS_PCH_SPLIT(dev_priv)) return 120000; - else if (!IS_GEN2(dev_priv)) + else if (!IS_GEN(dev_priv, 2)) return 96000; else return 48000; @@ -10501,7 +10834,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } - if (!IS_GEN2(dev_priv)) { + if (!IS_GEN(dev_priv, 2)) { if (IS_PINEVIEW(dev_priv)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); @@ -10653,20 +10986,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) /** * intel_wm_need_update - Check whether watermarks need updating - * @plane: drm plane - * @state: new plane state + * @cur: current plane state + * @new: new plane state * * Check current plane state versus the new one to determine whether * watermarks need to be recalculated. * * Returns true or false. */ -static bool intel_wm_need_update(struct drm_plane *plane, - struct drm_plane_state *state) +static bool intel_wm_need_update(struct intel_plane_state *cur, + struct intel_plane_state *new) { - struct intel_plane_state *new = to_intel_plane_state(state); - struct intel_plane_state *cur = to_intel_plane_state(plane->state); - /* Update watermarks on tiling or size changes. */ if (new->base.visible != cur->base.visible) return true; @@ -10775,7 +11105,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) pipe_config->disable_cxsr = true; - } else if (intel_wm_need_update(&plane->base, plane_state)) { + } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state), + to_intel_plane_state(plane_state))) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { /* FIXME bollocks */ pipe_config->update_wm_pre = true; @@ -10815,9 +11146,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * Despite the w/a only being listed for IVB we assume that * the ILK/SNB note has similar ramifications, hence we apply * the w/a on all three platforms. + * + * With experimental results seems this is needed also for primary + * plane, not only sprite plane. */ - if (plane->id == PLANE_SPRITE0 && - (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) || + if (plane->id != PLANE_CURSOR && + (IS_GEN_RANGE(dev_priv, 5, 6) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!needs_scaling(old_plane_state) && needs_scaling(to_intel_plane_state(plane_state))))) @@ -10933,7 +11267,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) } if (!linked_state) { - DRM_DEBUG_KMS("Need %d free Y planes for NV12\n", + DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", hweight8(crtc_state->nv12_planes)); return -EINVAL; @@ -10954,15 +11288,15 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); int ret; bool mode_changed = needs_modeset(crtc_state); - if (mode_changed && !crtc_state->active) + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && + mode_changed && !crtc_state->active) pipe_config->update_wm_post = true; if (mode_changed && crtc_state->enable && @@ -10974,16 +11308,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, return ret; } - if (crtc_state->color_mgmt_changed) { - ret = intel_color_check(crtc, crtc_state); + if (mode_changed || pipe_config->update_pipe || + crtc_state->color_mgmt_changed) { + ret = intel_color_check(pipe_config); if (ret) return ret; - - /* - * Changing color management on Intel hardware is - * handled as part of planes update. - */ - crtc_state->planes_changed = true; } ret = 0; @@ -11004,9 +11333,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(dev, - intel_crtc, - pipe_config); + ret = dev_priv->display.compute_intermediate_wm(pipe_config); if (ret) { DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; @@ -11014,7 +11341,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } if (INTEL_GEN(dev_priv) >= 9) { - if (mode_changed) + if (mode_changed || pipe_config->update_pipe) ret = skl_update_scaler_crtc(pipe_config); if (!ret) @@ -11156,6 +11483,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, m_n->link_m, m_n->link_n, m_n->tu); } +static void +intel_dump_infoframe(struct drm_i915_private *dev_priv, + const union hdmi_infoframe *frame) +{ + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); +} + #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x static const char * const output_type_str[] = { @@ -11259,6 +11596,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", pipe_config->has_audio, pipe_config->has_infoframe); + DRM_DEBUG_KMS("infoframes enabled: 0x%x\n", + pipe_config->infoframes.enable); + + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) + DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); + DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.mode); DRM_DEBUG_KMS("adjusted mode:\n"); @@ -11275,7 +11628,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->scaler_state.scaler_users, pipe_config->scaler_state.scaler_id); - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.pgm_ratios, @@ -11387,44 +11740,38 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) return ret; } -static void +static int clear_intel_crtc_state(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc_scaler_state scaler_state; - struct intel_dpll_hw_state dpll_hw_state; - struct intel_shared_dpll *shared_dpll; - struct intel_crtc_wm_state wm_state; - bool force_thru, ips_force_disable; + struct intel_crtc_state *saved_state; + + saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); + if (!saved_state) + return -ENOMEM; /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be * fixed, so that the crtc_state can be safely duplicated. For now, * only fields that are know to not cause problems are preserved. */ - scaler_state = crtc_state->scaler_state; - shared_dpll = crtc_state->shared_dpll; - dpll_hw_state = crtc_state->dpll_hw_state; - force_thru = crtc_state->pch_pfit.force_thru; - ips_force_disable = crtc_state->ips_force_disable; + saved_state->scaler_state = crtc_state->scaler_state; + saved_state->shared_dpll = crtc_state->shared_dpll; + saved_state->dpll_hw_state = crtc_state->dpll_hw_state; + saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru; + saved_state->crc_enabled = crtc_state->crc_enabled; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - wm_state = crtc_state->wm; + saved_state->wm = crtc_state->wm; /* Keep base drm_crtc_state intact, only clear our extended struct */ BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); - memset(&crtc_state->base + 1, 0, + memcpy(&crtc_state->base + 1, &saved_state->base + 1, sizeof(*crtc_state) - sizeof(crtc_state->base)); - crtc_state->scaler_state = scaler_state; - crtc_state->shared_dpll = shared_dpll; - crtc_state->dpll_hw_state = dpll_hw_state; - crtc_state->pch_pfit.force_thru = force_thru; - crtc_state->ips_force_disable = ips_force_disable; - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - crtc_state->wm = wm_state; + kfree(saved_state); + return 0; } static int @@ -11439,7 +11786,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, int i; bool retry = true; - clear_intel_crtc_state(pipe_config); + ret = clear_intel_crtc_state(pipe_config); + if (ret) + return ret; pipe_config->cpu_transcoder = (enum transcoder) to_intel_crtc(crtc)->pipe; @@ -11517,10 +11866,13 @@ encoder_retry: continue; encoder = to_intel_encoder(connector_state->best_encoder); - - if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { - DRM_DEBUG_KMS("Encoder config failure\n"); - return -EINVAL; + ret = encoder->compute_config(encoder, pipe_config, + connector_state); + if (ret < 0) { + if (ret != -EDEADLK) + DRM_DEBUG_KMS("Encoder config failure: %d\n", + ret); + return ret; } } @@ -11627,6 +11979,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, return false; } +static bool +intel_compare_infoframe(const union hdmi_infoframe *a, + const union hdmi_infoframe *b) +{ + return memcmp(a, b, sizeof(*a)) == 0; +} + +static void +pipe_config_infoframe_err(struct drm_i915_private *dev_priv, + bool adjust, const char *name, + const union hdmi_infoframe *a, + const union hdmi_infoframe *b) +{ + if (adjust) { + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name); + drm_dbg(DRM_UT_KMS, "expected:"); + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); + drm_dbg(DRM_UT_KMS, "found"); + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); + } else { + drm_err("mismatch in %s infoframe", name); + drm_err("expected:"); + hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); + drm_err("found"); + hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); + } +} + static void __printf(3, 4) pipe_config_err(bool adjust, const char *name, const char *format, ...) { @@ -11645,6 +12028,23 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...) va_end(args); } +static bool fastboot_enabled(struct drm_i915_private *dev_priv) +{ + if (i915_modparams.fastboot != -1) + return i915_modparams.fastboot; + + /* Enable fastboot by default on Skylake and newer */ + if (INTEL_GEN(dev_priv) >= 9) + return true; + + /* Enable fastboot by default on VLV and CHV */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + return true; + + /* Disabled by default on all others */ + return false; +} + static bool intel_pipe_config_compare(struct drm_i915_private *dev_priv, struct intel_crtc_state *current_config, @@ -11656,6 +12056,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); + if (fixup_inherited && !fastboot_enabled(dev_priv)) { + DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); + ret = false; + } + #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_err(adjust, __stringify(name), \ @@ -11788,7 +12193,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, } \ } while (0) -#define PIPE_CONF_QUIRK(quirk) \ +#define PIPE_CONF_CHECK_INFOFRAME(name) do { \ + if (!intel_compare_infoframe(¤t_config->infoframes.name, \ + &pipe_config->infoframes.name)) { \ + pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \ + ¤t_config->infoframes.name, \ + &pipe_config->infoframes.name); \ + ret = false; \ + } \ +} while (0) + +#define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) PIPE_CONF_CHECK_I(cpu_transcoder); @@ -11869,6 +12284,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_I(scaler_state.scaler_id); PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + + PIPE_CONF_CHECK_X(gamma_mode); + if (IS_CHERRYVIEW(dev_priv)) + PIPE_CONF_CHECK_X(cgm_mode); + else + PIPE_CONF_CHECK_X(csc_mode); + PIPE_CONF_CHECK_BOOL(gamma_enable); + PIPE_CONF_CHECK_BOOL(csc_enable); } PIPE_CONF_CHECK_BOOL(double_wide); @@ -11917,6 +12340,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_I(min_voltage_level); + PIPE_CONF_CHECK_X(infoframes.enable); + PIPE_CONF_CHECK_X(infoframes.gcp); + PIPE_CONF_CHECK_INFOFRAME(avi); + PIPE_CONF_CHECK_INFOFRAME(spd); + PIPE_CONF_CHECK_INFOFRAME(hdmi); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -11951,12 +12380,15 @@ static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct skl_ddb_allocation hw_ddb, *sw_ddb; - struct skl_pipe_wm hw_wm, *sw_wm; - struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + struct skl_hw_state { + struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; + struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; + struct skl_ddb_allocation ddb; + struct skl_pipe_wm wm; + } *hw; + struct skl_ddb_allocation *sw_ddb; + struct skl_pipe_wm *sw_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES]; - struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); @@ -11964,22 +12396,29 @@ static void verify_wm_state(struct drm_crtc *crtc, if (INTEL_GEN(dev_priv) < 9 || !new_state->active) return; - skl_pipe_wm_get_hw_state(crtc, &hw_wm); + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + return; + + skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm); sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; - skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv); + skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv); - skl_ddb_get_hw_state(dev_priv, &hw_ddb); + skl_ddb_get_hw_state(dev_priv, &hw->ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; - if (INTEL_GEN(dev_priv) >= 11) - if (hw_ddb.enabled_slices != sw_ddb->enabled_slices) - DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", - sw_ddb->enabled_slices, - hw_ddb.enabled_slices); + if (INTEL_GEN(dev_priv) >= 11 && + hw->ddb.enabled_slices != sw_ddb->enabled_slices) + DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", + sw_ddb->enabled_slices, + hw->ddb.enabled_slices); + /* planes */ for_each_universal_plane(dev_priv, pipe, plane) { - hw_plane_wm = &hw_wm.planes[plane]; + struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + + hw_plane_wm = &hw->wm.planes[plane]; sw_plane_wm = &sw_wm->planes[plane]; /* Watermarks */ @@ -12011,7 +12450,7 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb_y[plane]; + hw_ddb_entry = &hw->ddb_y[plane]; sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { @@ -12029,7 +12468,9 @@ static void verify_wm_state(struct drm_crtc *crtc, * once the plane becomes visible, we can skip this check */ if (1) { - hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; + struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + + hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; /* Watermarks */ @@ -12061,7 +12502,7 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR]; + hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { @@ -12071,6 +12512,8 @@ static void verify_wm_state(struct drm_crtc *crtc, hw_ddb_entry->start, hw_ddb_entry->end); } } + + kfree(hw); } static void @@ -12227,7 +12670,8 @@ intel_verify_planes(struct intel_atomic_state *state) for_each_new_intel_plane_in_state(state, plane, plane_state, i) - assert_plane(plane, plane_state->base.visible); + assert_plane(plane, plane_state->slave || + plane_state->base.visible); } static void @@ -12378,7 +12822,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) * However if queried just before the start of vblank we'll get an * answer that's slightly in the future. */ - if (IS_GEN2(dev_priv)) { + if (IS_GEN(dev_priv, 2)) { const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; int vtotal; @@ -12549,10 +12993,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state) return -EINVAL; } + /* keep the current setting */ + if (!intel_state->cdclk.force_min_cdclk_changed) + intel_state->cdclk.force_min_cdclk = + dev_priv->cdclk.force_min_cdclk; + intel_state->modeset = true; intel_state->active_crtcs = dev_priv->active_crtcs; intel_state->cdclk.logical = dev_priv->cdclk.logical; intel_state->cdclk.actual = dev_priv->cdclk.actual; + intel_state->cdclk.pipe = INVALID_PIPE; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->active) @@ -12572,6 +13022,8 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { + enum pipe pipe; + ret = dev_priv->display.modeset_calc_cdclk(state); if (ret < 0) return ret; @@ -12588,12 +13040,36 @@ static int intel_modeset_checks(struct drm_atomic_state *state) return ret; } + if (is_power_of_2(intel_state->active_crtcs)) { + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + pipe = ilog2(intel_state->active_crtcs); + crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (crtc_state && needs_modeset(crtc_state)) + pipe = INVALID_PIPE; + } else { + pipe = INVALID_PIPE; + } + /* All pipes must be switched off while we change the cdclk. */ - if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, - &intel_state->cdclk.actual)) { + if (pipe != INVALID_PIPE && + intel_cdclk_needs_cd2x_update(dev_priv, + &dev_priv->cdclk.actual, + &intel_state->cdclk.actual)) { + ret = intel_lock_all_pipes(state); + if (ret < 0) + return ret; + + intel_state->cdclk.pipe = pipe; + } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, + &intel_state->cdclk.actual)) { ret = intel_modeset_all_pipes(state); if (ret < 0) return ret; + + intel_state->cdclk.pipe = INVALID_PIPE; } DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", @@ -12602,8 +13078,6 @@ static int intel_modeset_checks(struct drm_atomic_state *state) DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", intel_state->cdclk.logical.voltage_level, intel_state->cdclk.actual.voltage_level); - } else { - to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical; } intel_modeset_clear_plls(state); @@ -12619,9 +13093,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * phase. The code here should be run after the per-crtc and per-plane 'check' * handlers to ensure that all derived state has been updated. */ -static int calc_watermark_data(struct drm_atomic_state *state) +static int calc_watermark_data(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); /* Is there platform-specific watermark information to calculate? */ @@ -12644,7 +13118,7 @@ static int intel_atomic_check(struct drm_device *dev, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *crtc_state; int ret, i; - bool any_ms = false; + bool any_ms = intel_state->cdclk.force_min_cdclk_changed; /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, @@ -12679,8 +13153,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } - if (i915_modparams.fastboot && - intel_pipe_config_compare(dev_priv, + if (intel_pipe_config_compare(dev_priv, to_intel_crtc_state(old_crtc_state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -12695,6 +13168,10 @@ static int intel_atomic_check(struct drm_device *dev, "[modeset]" : "[fastset]"); } + ret = drm_dp_mst_atomic_check(state); + if (ret) + return ret; + if (any_ms) { ret = intel_modeset_checks(state); @@ -12713,7 +13190,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, intel_state); - return calc_watermark_data(state); + return calc_watermark_data(intel_state); } static int intel_atomic_prepare_commit(struct drm_device *dev, @@ -12725,8 +13202,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; + struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; - if (!dev->max_vblank_count) + if (!vblank->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); return dev->driver->get_vblank_counter(dev, crtc->pipe); @@ -12755,9 +13233,14 @@ static void intel_update_crtc(struct drm_crtc *crtc, } else { intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), pipe_config); + + if (pipe_config->update_pipe) + intel_encoders_update_pipe(crtc, pipe_config, state); } - if (new_plane_state) + if (pipe_config->update_pipe && !pipe_config->enable_fbc) + intel_fbc_disable(intel_crtc); + else if (new_plane_state) intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); intel_begin_crtc_commit(crtc, old_crtc_state); @@ -12930,6 +13413,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc *crtc; struct intel_crtc *intel_crtc; u64 put_domains[I915_MAX_PIPES] = {}; + intel_wakeref_t wakeref = 0; int i; intel_atomic_commit_fence_wait(intel_state); @@ -12937,7 +13421,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_dependencies(state); if (intel_state->modeset) - intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); @@ -12980,7 +13464,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) /* FIXME unify this for all platforms */ if (!new_crtc_state->active && - !HAS_GMCH_DISPLAY(dev_priv) && + !HAS_GMCH(dev_priv) && dev_priv->display.initial_watermarks) dev_priv->display.initial_watermarks(intel_state, new_intel_crtc_state); @@ -12994,7 +13478,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (intel_state->modeset) { drm_atomic_helper_update_legacy_modeset_state(state->dev, state); - intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual); + intel_set_cdclk_pre_plane_update(dev_priv, + &intel_state->cdclk.actual, + &dev_priv->cdclk.actual, + intel_state->cdclk.pipe); /* * SKL workaround: bspec recommends we disable the SAGV when we @@ -13023,6 +13510,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); + if (intel_state->modeset) + intel_set_cdclk_post_plane_update(dev_priv, + &intel_state->cdclk.actual, + &dev_priv->cdclk.actual, + intel_state->cdclk.pipe); + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To * fix this: @@ -13034,6 +13527,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) */ drm_atomic_helper_wait_for_flip_done(dev, state); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); + + if (new_crtc_state->active && + !needs_modeset(new_crtc_state) && + (new_intel_crtc_state->base.color_mgmt_changed || + new_intel_crtc_state->update_pipe)) + intel_color_load_luts(new_intel_crtc_state); + } + /* * Now that the vblank has passed, we can go ahead and program the * optimal watermarks on platforms that need two-step watermark @@ -13073,8 +13576,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * so enable debugging for the next modeset - and hope we catch * the culprit. */ - intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); + intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); + intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } /* @@ -13214,8 +13717,10 @@ static int intel_atomic_commit(struct drm_device *dev, intel_state->min_voltage_level, sizeof(intel_state->min_voltage_level)); dev_priv->active_crtcs = intel_state->active_crtcs; - dev_priv->cdclk.logical = intel_state->cdclk.logical; - dev_priv->cdclk.actual = intel_state->cdclk.actual; + dev_priv->cdclk.force_min_cdclk = + intel_state->cdclk.force_min_cdclk; + + intel_cdclk_swap_state(intel_state); } drm_atomic_state_get(state); @@ -13266,7 +13771,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait, * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) - gen6_rps_boost(rq, NULL); + gen6_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); @@ -13527,7 +14032,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, * or * cdclk/crtc_clock */ - mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3; + mult = is_planar_yuv_format(pixel_format) ? 2 : 3; tmpclk1 = (1 << 16) * mult - 1; tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); max_scale = min(tmpclk1, tmpclk2); @@ -13549,19 +14054,16 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc); bool modeset = needs_modeset(&intel_cstate->base); - if (!modeset && - (intel_cstate->base.color_mgmt_changed || - intel_cstate->update_pipe)) { - intel_color_set_csc(&intel_cstate->base); - intel_color_load_luts(&intel_cstate->base); - } - /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_cstate); if (modeset) goto out; + if (intel_cstate->base.color_mgmt_changed || + intel_cstate->update_pipe) + intel_color_commit(intel_cstate); + if (intel_cstate->update_pipe) intel_update_pipe_config(old_intel_cstate, intel_cstate); else if (INTEL_GEN(dev_priv) >= 9) @@ -13578,7 +14080,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!IS_GEN2(dev_priv)) + if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) { @@ -13702,8 +14204,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -13802,14 +14304,11 @@ intel_legacy_cursor_update(struct drm_plane *plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (plane->state->visible) { - trace_intel_update_plane(plane, to_intel_crtc(crtc)); - intel_plane->update_plane(intel_plane, crtc_state, - to_intel_plane_state(plane->state)); - } else { - trace_intel_disable_plane(plane, to_intel_crtc(crtc)); - intel_plane->disable_plane(intel_plane, crtc_state); - } + if (plane->state->visible) + intel_update_plane(intel_plane, crtc_state, + to_intel_plane_state(plane->state)); + else + intel_disable_plane(intel_plane, crtc_state); intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); @@ -14040,7 +14539,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i; - crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; + crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; if (!crtc->num_scalers) return; @@ -14126,7 +14625,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); - intel_color_init(&intel_crtc->base); + intel_color_init(intel_crtc); WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); @@ -14177,7 +14676,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) return index_mask; } -static bool has_edp_a(struct drm_i915_private *dev_priv) +static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) { if (!IS_MOBILE(dev_priv)) return false; @@ -14185,13 +14684,13 @@ static bool has_edp_a(struct drm_i915_private *dev_priv) if ((I915_READ(DP_A) & DP_DETECTED) == 0) return false; - if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) + if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) return false; return true; } -static bool intel_crt_present(struct drm_i915_private *dev_priv) +static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) >= 9) return false; @@ -14199,15 +14698,12 @@ static bool intel_crt_present(struct drm_i915_private *dev_priv) if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) return false; - if (IS_CHERRYVIEW(dev_priv)) - return false; - if (HAS_PCH_LPT_H(dev_priv) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) return false; /* DDI E can't be used if DDI A requires 4 lanes */ - if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; if (!dev_priv->vbt.int_crt_support) @@ -14262,23 +14758,26 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - /* - * intel_edp_init_connector() depends on this completing first, to - * prevent the registeration of both eDP and LVDS and the incorrect - * sharing of the PPS. - */ - intel_lvds_init(dev_priv); - - if (intel_crt_present(dev_priv)) - intel_crt_init(dev_priv); - - if (IS_ICELAKE(dev_priv)) { + if (IS_ELKHARTLAKE(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + icl_dsi_init(dev_priv); + } else if (INTEL_GEN(dev_priv) >= 11) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_D); intel_ddi_init(dev_priv, PORT_E); - intel_ddi_init(dev_priv, PORT_F); + /* + * On some ICL SKUs port F is not present. No strap bits for + * this, so rely on VBT. + * Work around broken VBTs on SKUs known to have no port F. + */ + if (IS_ICL_WITH_PORT_F(dev_priv) && + intel_bios_is_port_present(dev_priv, PORT_F)) + intel_ddi_init(dev_priv, PORT_F); + icl_dsi_init(dev_priv); } else if (IS_GEN9_LP(dev_priv)) { /* @@ -14294,6 +14793,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (HAS_DDI(dev_priv)) { int found; + if (intel_ddi_crt_present(dev_priv)) + intel_crt_init(dev_priv); + /* * Haswell uses DDI functions to detect digital outputs. * On SKL pre-D0 the strap isn't connected, so we assume @@ -14320,16 +14822,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * On SKL we don't have a way to detect DDI-E so we rely on VBT. */ if (IS_GEN9_BC(dev_priv) && - (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || - dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || - dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) + intel_bios_is_port_present(dev_priv, PORT_E)) intel_ddi_init(dev_priv, PORT_E); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; + + /* + * intel_edp_init_connector() depends on this completing first, + * to prevent the registration of both eDP and LVDS and the + * incorrect sharing of the PPS. + */ + intel_lvds_init(dev_priv); + intel_crt_init(dev_priv); + dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); - if (has_edp_a(dev_priv)) + if (ilk_has_edp_a(dev_priv)) intel_dp_init(dev_priv, DP_A, PORT_A); if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { @@ -14355,6 +14864,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; + if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14397,9 +14909,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } vlv_dsi_init(dev_priv); - } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { + } else if (IS_PINEVIEW(dev_priv)) { + intel_lvds_init(dev_priv); + intel_crt_init(dev_priv); + } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { bool found = false; + if (IS_MOBILE(dev_priv)) + intel_lvds_init(dev_priv); + + intel_crt_init(dev_priv); + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); @@ -14431,11 +14951,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); - } else if (IS_GEN2(dev_priv)) - intel_dvo_init(dev_priv); - if (SUPPORTS_TV(dev_priv)) - intel_tv_init(dev_priv); + if (SUPPORTS_TV(dev_priv)) + intel_tv_init(dev_priv); + } else if (IS_GEN(dev_priv, 2)) { + if (IS_I85X(dev_priv)) + intel_lvds_init(dev_priv); + + intel_crt_init(dev_priv); + intel_dvo_init(dev_priv); + } intel_psr_init(dev_priv); @@ -14602,14 +15127,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); - if (fb->format->format == DRM_FORMAT_NV12 && - (fb->width < SKL_MIN_YUV_420_SRC_W || - fb->height < SKL_MIN_YUV_420_SRC_H || - (fb->width % 4) != 0 || (fb->height % 4) != 0)) { - DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); - goto err; - } - for (i = 0; i < fb->format->num_planes; i++) { u32 stride_alignment; @@ -14629,7 +15146,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, * require the entire fb to accommodate that to avoid * potential runtime errors at plane configuration time. */ - if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 && + if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && is_ccs_modifier(fb->modifier)) stride_alignment *= 4; @@ -14834,7 +15351,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - } else if (!IS_GEN2(dev_priv)) { + } else if (!IS_GEN(dev_priv, 2)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; @@ -14850,9 +15367,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.crtc_disable = i9xx_crtc_disable; } - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { dev_priv->display.fdi_link_train = ironlake_fdi_link_train; - } else if (IS_GEN6(dev_priv)) { + } else if (IS_GEN(dev_priv, 6)) { dev_priv->display.fdi_link_train = gen6_fdi_link_train; } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ @@ -14945,7 +15462,7 @@ retry: * intermediate watermarks (since we don't trust the current * watermarks). */ - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) intel_state->skip_intermediate_wm = true; ret = intel_atomic_check(dev, state); @@ -14984,12 +15501,12 @@ fail: static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) { - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { u32 fdi_pll_clk = I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; - } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { + } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { dev_priv->fdi_pll_freq = 270000; } else { return; @@ -15105,10 +15622,10 @@ int intel_modeset_init(struct drm_device *dev) } /* maximum framebuffer dimensions */ - if (IS_GEN2(dev_priv)) { + if (IS_GEN(dev_priv, 2)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; - } else if (IS_GEN3(dev_priv)) { + } else if (IS_GEN(dev_priv, 3)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { @@ -15119,7 +15636,7 @@ int intel_modeset_init(struct drm_device *dev) if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; dev->mode_config.cursor_height = 1023; - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { dev->mode_config.cursor_width = 64; dev->mode_config.cursor_height = 64; } else { @@ -15147,6 +15664,8 @@ int intel_modeset_init(struct drm_device *dev) intel_update_czclk(dev_priv); intel_modeset_init_hw(dev); + intel_hdcp_component_init(dev_priv); + if (dev_priv->max_cdclk_freq == 0) intel_update_max_cdclk(dev_priv); @@ -15186,7 +15705,7 @@ int intel_modeset_init(struct drm_device *dev) * Note that we need to do this after reconstructing the BIOS fb's * since the watermark calculation done here will use pstate->fb. */ - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) sanitize_watermarks(dev); /* @@ -15222,7 +15741,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe), clock.vco, clock.dot); fp = i9xx_dpll_compute_fp(&clock); - dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) | + dpll = DPLL_DVO_2X_MODE | DPLL_VGA_MODE_DIS | ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | PLL_P2_DIVIDE_BY_4 | @@ -15379,6 +15898,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, plane->base.type != DRM_PLANE_TYPE_PRIMARY) intel_plane_disable_noatomic(crtc, plane); } + + /* + * Disable any background color set by the BIOS, but enable the + * gamma and CSC to match how we program our planes. + */ + if (INTEL_GEN(dev_priv) >= 9) + I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), + SKL_BOTTOM_COLOR_GAMMA_ENABLE | + SKL_BOTTOM_COLOR_CSC_ENABLE); } /* Adjust the state of the output pipe according to whether we @@ -15386,7 +15914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) intel_crtc_disable_noatomic(&crtc->base, ctx); - if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) { + if (crtc_state->base.active || HAS_GMCH(dev_priv)) { /* * We start out with underrun reporting disabled to avoid races. * For correct bookkeeping mark this on active crtcs. @@ -15415,16 +15943,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, } } +static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* + * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram + * the hardware when a high res displays plugged in. DPLL P + * divider is zero, and the pipe timings are bonkers. We'll + * try to disable everything in that case. + * + * FIXME would be nice to be able to sanitize this state + * without several WARNs, but for now let's take the easy + * road. + */ + return IS_GEN(dev_priv, 6) && + crtc_state->base.active && + crtc_state->shared_dpll && + crtc_state->port_clock == 0; +} + static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc_state *crtc_state = crtc ? + to_intel_crtc_state(crtc->base.state) : NULL; /* We need to check both for a crtc link (meaning that the * encoder is active and trying to read from a pipe) and the * pipe itself being active. */ - bool has_active_crtc = encoder->base.crtc && - to_intel_crtc(encoder->base.crtc)->active; + bool has_active_crtc = crtc_state && + crtc_state->base.active; + + if (crtc_state && has_bogus_dpll_config(crtc_state)) { + DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", + pipe_name(crtc->pipe)); + has_active_crtc = false; + } connector = intel_encoder_find_connector(encoder); if (connector && !has_active_crtc) { @@ -15435,16 +15992,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* Connector is active, but has no active pipe. This is * fallout from our resume register restoring. Disable * the encoder manually again. */ - if (encoder->base.crtc) { - struct drm_crtc_state *crtc_state = encoder->base.crtc->state; + if (crtc_state) { + struct drm_encoder *best_encoder; DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); + + /* avoid oopsing in case the hooks consult best_encoder */ + best_encoder = connector->base.state->best_encoder; + connector->base.state->best_encoder = &encoder->base; + if (encoder->disable) - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->disable(encoder, crtc_state, + connector->base.state); if (encoder->post_disable) - encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + encoder->post_disable(encoder, crtc_state, + connector->base.state); + + connector->base.state->best_encoder = best_encoder; } encoder->base.crtc = NULL; @@ -15476,19 +16042,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) void i915_redisable_vga(struct drm_i915_private *dev_priv) { - /* This function can be called both from intel_modeset_setup_hw_state or + intel_wakeref_t wakeref; + + /* + * This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very * paranoid "someone might have enabled VGA while we were not looking" * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy - * the rest of the driver uses. */ - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) + * the rest of the driver uses. + */ + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_VGA); + if (!wakeref) return; i915_redisable_vga_power_on(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); + intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref); } /* FIXME read out full plane state for all planes */ @@ -15788,12 +16360,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; struct intel_encoder *encoder; + struct intel_crtc *crtc; + intel_wakeref_t wakeref; int i; - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_early_display_was(dev_priv); intel_modeset_readout_hw_state(dev); @@ -15809,10 +16382,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev, * waits, so we need vblank interrupts restored beforehand. */ for_each_intel_crtc(&dev_priv->drm, crtc) { + crtc_state = to_intel_crtc_state(crtc->base.state); + drm_crtc_vblank_reset(&crtc->base); - if (crtc->base.state->active) - drm_crtc_vblank_on(&crtc->base); + if (crtc_state->base.active) + intel_crtc_vblank_on(crtc_state); } intel_sanitize_plane_mapping(dev_priv); @@ -15843,15 +16418,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev, } if (IS_G4X(dev_priv)) { - g4x_wm_get_hw_state(dev); + g4x_wm_get_hw_state(dev_priv); g4x_wm_sanitize(dev_priv); } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_wm_get_hw_state(dev); + vlv_wm_get_hw_state(dev_priv); vlv_wm_sanitize(dev_priv); } else if (INTEL_GEN(dev_priv) >= 9) { - skl_wm_get_hw_state(dev); + skl_wm_get_hw_state(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { - ilk_wm_get_hw_state(dev); + ilk_wm_get_hw_state(dev_priv); } for_each_intel_crtc(dev, crtc) { @@ -15863,7 +16438,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, modeset_put_power_domains(dev_priv, put_domains); } - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); intel_fbc_init_pipe_state(dev_priv); } @@ -15952,6 +16527,8 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); + intel_hdcp_component_fini(dev_priv); + drm_mode_config_cleanup(dev); intel_overlay_cleanup(dev_priv); @@ -15998,8 +16575,6 @@ struct intel_display_error_state { u32 power_well_driver; - int num_transcoders; - struct intel_cursor_error_state { u32 control; u32 position; @@ -16024,6 +16599,7 @@ struct intel_display_error_state { } plane[I915_MAX_PIPES]; struct intel_transcoder_error_state { + bool available; bool power_domain_on; enum transcoder cpu_transcoder; @@ -16050,6 +16626,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) }; int i; + BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); + if (!HAS_DISPLAY(dev_priv)) return NULL; @@ -16086,18 +16664,17 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } - /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; - if (HAS_DDI(dev_priv)) - error->num_transcoders++; /* Account for eDP. */ - - for (i = 0; i < error->num_transcoders; i++) { + for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { enum transcoder cpu_transcoder = transcoders[i]; + if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) + continue; + + error->transcoder[i].available = true; error->transcoder[i].power_domain_on = __intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder)); @@ -16161,7 +16738,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " BASE: %08x\n", error->cursor[i].base); } - for (i = 0; i < error->num_transcoders; i++) { + for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { + if (!error->transcoder[i].available) + continue; + err_printf(m, "CPU transcoder: %s\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " Power: %s\n", diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 4262452963b3..2220588e86ac 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -26,6 +26,7 @@ #define _INTEL_DISPLAY_H_ #include <drm/drm_util.h> +#include <drm/i915_drm.h> enum i915_gpio { GPIOA, @@ -121,7 +122,7 @@ enum i9xx_plane_id { }; #define plane_name(p) ((p) + 'A') -#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') +#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') /* * Per-pipe plane identifier. @@ -150,21 +151,6 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) -enum port { - PORT_NONE = -1, - - PORT_A = 0, - PORT_B, - PORT_C, - PORT_D, - PORT_E, - PORT_F, - - I915_MAX_PORTS -}; - -#define port_name(p) ((p) + 'A') - /* * Ports identifier referenced from other drivers. * Expected to remain stable over time @@ -311,12 +297,12 @@ struct intel_link_m_n { #define for_each_universal_plane(__dev_priv, __pipe, __p) \ for ((__p) = 0; \ - (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ + (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ (__p)++) #define for_each_sprite(__dev_priv, __p, __s) \ for ((__s) = 0; \ - (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ + (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \ (__s)++) #define for_each_port_masked(__port, __ports_mask) \ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..72c49070ed14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -32,13 +32,12 @@ #include <linux/notifier.h> #include <linux/reboot.h> #include <asm/byteorder.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_hdcp.h> +#include <drm/drm_probe_helper.h> #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" @@ -304,9 +303,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; - if (port == PORT_B) + if (intel_port_is_combophy(dev_priv, port) && + !intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -344,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 10) { source_rates = cnl_rates; size = ARRAY_SIZE(cnl_rates); - if (IS_GEN10(dev_priv)) + if (IS_GEN(dev_priv, 10)) max_rate = cnl_max_source_rate(intel_dp); else max_rate = icl_max_source_rate(intel_dp); @@ -428,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) } static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, - uint8_t lane_count) + u8 lane_count) { /* * FIXME: we need to synchronize the current link parameters with @@ -448,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int link_rate, - uint8_t lane_count) + u8 lane_count) { const struct drm_display_mode *fixed_mode = intel_dp->attached_connector->panel.fixed_mode; @@ -463,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, } int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count) + int link_rate, u8 lane_count) { int index; @@ -571,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_OK; } -uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) +u32 intel_dp_pack_aux(const u8 *src, int src_bytes) { - int i; - uint32_t v = 0; + int i; + u32 v = 0; if (src_bytes > 4) src_bytes = 4; for (i = 0; i < src_bytes; i++) - v |= ((uint32_t) src[i]) << ((3-i) * 8); + v |= ((u32)src[i]) << ((3 - i) * 8); return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -600,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, static void intel_dp_pps_init(struct intel_dp *intel_dp); -static void pps_lock(struct intel_dp *intel_dp) +static intel_wakeref_t +pps_lock(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; /* * See intel_power_sequencer_reset() why we need * a power domain reference here. */ - intel_display_power_get(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); + wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); mutex_lock(&dev_priv->pps_mutex); + + return wakeref; } -static void pps_unlock(struct intel_dp *intel_dp) +static intel_wakeref_t +pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->pps_mutex); - intel_display_power_put(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); + intel_aux_power_domain(dp_to_dig_port(intel_dp)), + wakeref); + return 0; } +#define with_pps_lock(dp, wf) \ + for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) + static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { @@ -633,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); - uint32_t DP; + u32 DP; if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, "skipping pipe %c power sequencer kick due to port %c being active\n", @@ -939,8 +949,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, regs->pp_stat = PP_STATUS(pps_idx); regs->pp_on = PP_ON_DELAYS(pps_idx); regs->pp_off = PP_OFF_DELAYS(pps_idx); - if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) && - !HAS_PCH_ICP(dev_priv)) + + /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ + if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + regs->pp_div = INVALID_MMIO_REG; + else regs->pp_div = PP_DIVISOR(pps_idx); } @@ -972,30 +985,29 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) return 0; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); - i915_reg_t pp_ctrl_reg, pp_div_reg; - u32 pp_div; - - pp_ctrl_reg = PP_CONTROL(pipe); - pp_div_reg = PP_DIVISOR(pipe); - pp_div = I915_READ(pp_div_reg); - pp_div &= PP_REFERENCE_DIVIDER_MASK; - - /* 0x1F write to PP_DIV_REG sets max cycle delay */ - I915_WRITE(pp_div_reg, pp_div | 0x1F); - I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); - msleep(intel_dp->panel_power_cycle_delay); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + i915_reg_t pp_ctrl_reg, pp_div_reg; + u32 pp_div; + + pp_ctrl_reg = PP_CONTROL(pipe); + pp_div_reg = PP_DIVISOR(pipe); + pp_div = I915_READ(pp_div_reg); + pp_div &= PP_REFERENCE_DIVIDER_MASK; + + /* 0x1F write to PP_DIV_REG sets max cycle delay */ + I915_WRITE(pp_div_reg, pp_div | 0x1F); + I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS); + msleep(intel_dp->panel_power_cycle_delay); + } } - pps_unlock(intel_dp); - return 0; } @@ -1041,17 +1053,21 @@ intel_dp_check_edp(struct intel_dp *intel_dp) } } -static uint32_t +static u32 intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); - uint32_t status; + u32 status; bool done; #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, msecs_to_jiffies_timeout(10)); + + /* just trace the final value */ + trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); + if (!done) DRM_ERROR("dp aux hw did not signal timeout!\n"); #undef C @@ -1059,7 +1075,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) return status; } -static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1073,7 +1089,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } -static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -1092,7 +1108,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } -static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -1109,7 +1125,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return ilk_get_aux_clock_divider(intel_dp, index); } -static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { /* * SKL doesn't need us to program the AUX clock divider (Hardware will @@ -1119,16 +1135,16 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return index ? 0 : 1; } -static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, - int send_bytes, - uint32_t aux_clock_divider) +static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 aux_clock_divider) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - uint32_t precharge, timeout; + u32 precharge, timeout; - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) precharge = 3; else precharge = 5; @@ -1149,12 +1165,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); } -static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, - int send_bytes, - uint32_t unused) +static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 unused) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - uint32_t ret; + u32 ret; ret = DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | @@ -1174,25 +1190,26 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, static int intel_dp_aux_xfer(struct intel_dp *intel_dp, - const uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size, + const u8 *send, int send_bytes, + u8 *recv, int recv_size, u32 aux_send_ctl_flags) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); i915_reg_t ch_ctl, ch_data[5]; - uint32_t aux_clock_divider; + u32 aux_clock_divider; + intel_wakeref_t wakeref; int i, ret, recv_bytes; - uint32_t status; int try, clock = 0; + u32 status; bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - pps_lock(intel_dp); + wakeref = pps_lock(intel_dp); /* * We will be called with VDD already enabled for dpcd/edid/oui reads. @@ -1217,6 +1234,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, break; msleep(1); } + /* just trace the final value */ + trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (try == 3) { static u32 last_status = -1; @@ -1336,7 +1355,7 @@ out: if (vdd) edp_panel_vdd_off(intel_dp, false); - pps_unlock(intel_dp); + pps_unlock(intel_dp, wakeref); return ret; } @@ -1358,7 +1377,7 @@ static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); - uint8_t txbuf[20], rxbuf[20]; + u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; int ret; @@ -1691,7 +1710,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) } void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, - uint8_t *link_bw, uint8_t *rate_select) + u8 *link_bw, u8 *rate_select) { /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { @@ -1704,12 +1723,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, } } -struct link_config_limits { - int min_clock, max_clock; - int min_lane_count, max_lane_count; - int min_bpp, max_bpp; -}; - static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { @@ -1772,7 +1785,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp, } /* Adjust link config limits based on compliance test requests. */ -static void +void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct link_config_limits *limits) @@ -1808,7 +1821,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, } /* Optimize link config in order: max bpp, min clock, min lanes */ -static bool +static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct link_config_limits *limits) @@ -1834,17 +1847,17 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, pipe_config->pipe_bpp = bpp; pipe_config->port_clock = link_clock; - return true; + return 0; } } } } - return false; + return -EINVAL; } /* Optimize link config in order: max bpp, min lanes, min clock */ -static bool +static int intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct link_config_limits *limits) @@ -1870,13 +1883,13 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, pipe_config->pipe_bpp = bpp; pipe_config->port_clock = link_clock; - return true; + return 0; } } } } - return false; + return -EINVAL; } static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) @@ -1894,19 +1907,20 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) return 0; } -static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state, - struct link_config_limits *limits) +static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; u8 dsc_max_bpc; int pipe_bpp; + int ret; if (!intel_dp_supports_dsc(intel_dp, pipe_config)) - return false; + return -EINVAL; dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, conn_state->max_requested_bpc); @@ -1914,7 +1928,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); - return false; + return -EINVAL; } /* @@ -1948,7 +1962,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, adjusted_mode->crtc_hdisplay); if (!dsc_max_output_bpp || !dsc_dp_slice_count) { DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); - return false; + return -EINVAL; } pipe_config->dsc_params.compressed_bpp = min_t(u16, dsc_max_output_bpp >> 4, @@ -1965,16 +1979,19 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc_params.dsc_split = true; } else { DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); - return false; + return -EINVAL; } } - if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) { + + ret = intel_dp_compute_dsc_params(intel_dp, pipe_config); + if (ret < 0) { DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " "Compressed BPP = %d\n", pipe_config->pipe_bpp, pipe_config->dsc_params.compressed_bpp); - return false; + return ret; } + pipe_config->dsc_params.compression_enable = true; DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " "Compressed Bpp = %d Slice Count = %d\n", @@ -1982,10 +1999,10 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc_params.compressed_bpp, pipe_config->dsc_params.slice_count); - return true; + return 0; } -static bool +static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1994,7 +2011,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct link_config_limits limits; int common_len; - bool ret; + int ret; common_len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); @@ -2051,10 +2068,12 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, &limits); /* enable compression if the mode doesn't fit available BW */ - if (!ret) { - if (!intel_dp_dsc_compute_config(intel_dp, pipe_config, - conn_state, &limits)) - return false; + DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); + if (ret || intel_dp->force_dsc_en) { + ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, + conn_state, &limits); + if (ret < 0) + return ret; } if (pipe_config->dsc_params.compression_enable) { @@ -2079,10 +2098,33 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } - return true; + return 0; } -bool +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { + /* + * See: + * CEA-861-E - 5.1 Default Encoding Parameters + * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry + */ + return crtc_state->pipe_bpp != 18 && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + } else { + return intel_conn_state->broadcast_rgb == + INTEL_BROADCAST_RGB_LIMITED; + } +} + +int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2098,6 +2140,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, to_intel_digital_connector_state(conn_state); bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); + int ret; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) pipe_config->has_pch_encoder = true; @@ -2119,14 +2162,12 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (INTEL_GEN(dev_priv) >= 9) { - int ret; - ret = skl_update_scaler_crtc(pipe_config); if (ret) return ret; } - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) intel_gmch_panel_fitting(intel_crtc, pipe_config, conn_state->scaling_mode); else @@ -2135,35 +2176,24 @@ intel_dp_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; - if (HAS_GMCH_DISPLAY(dev_priv) && + if (HAS_GMCH(dev_priv) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - return false; + return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) - return false; + return -EINVAL; pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && intel_dp_supports_fec(intel_dp, pipe_config); - if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state)) - return false; + ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); + if (ret < 0) + return ret; - if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { - /* - * See: - * CEA-861-E - 5.1 Default Encoding Parameters - * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry - */ - pipe_config->limited_color_range = - pipe_config->pipe_bpp != 18 && - drm_default_rgb_quant_range(adjusted_mode) == - HDMI_QUANTIZATION_RANGE_LIMITED; - } else { - pipe_config->limited_color_range = - intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; - } + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); if (!pipe_config->dsc_params.compression_enable) intel_link_compute_m_n(pipe_config->pipe_bpp, @@ -2196,11 +2226,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); - return true; + return 0; } void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count, + int link_rate, u8 lane_count, bool link_mst) { intel_dp->link_trained = false; @@ -2323,7 +2353,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", @@ -2462,15 +2492,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) */ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; bool vdd; if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - vdd = edp_panel_vdd_on(intel_dp); - pps_unlock(intel_dp); - + vdd = false; + with_pps_lock(intel_dp, wakeref) + vdd = edp_panel_vdd_on(intel_dp); I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", port_name(dp_to_dig_port(intel_dp)->base.port)); } @@ -2509,19 +2539,21 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); - intel_display_power_put(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_display_power_put_unchecked(dev_priv, + intel_aux_power_domain(intel_dig_port)); } static void edp_panel_vdd_work(struct work_struct *__work) { - struct intel_dp *intel_dp = container_of(to_delayed_work(__work), - struct intel_dp, panel_vdd_work); + struct intel_dp *intel_dp = + container_of(to_delayed_work(__work), + struct intel_dp, panel_vdd_work); + intel_wakeref_t wakeref; - pps_lock(intel_dp); - if (!intel_dp->want_panel_vdd) - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + if (!intel_dp->want_panel_vdd) + edp_panel_vdd_off_sync(intel_dp); + } } static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) @@ -2585,7 +2617,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ironlake_get_pp_control(intel_dp); - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; I915_WRITE(pp_ctrl_reg, pp); @@ -2593,7 +2625,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) } pp |= PANEL_POWER_ON; - if (!IS_GEN5(dev_priv)) + if (!IS_GEN(dev_priv, 5)) pp |= PANEL_POWER_RESET; I915_WRITE(pp_ctrl_reg, pp); @@ -2602,7 +2634,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) wait_panel_on(intel_dp); intel_dp->last_power_on = jiffies; - if (IS_GEN5(dev_priv)) { + if (IS_GEN(dev_priv, 5)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -2611,12 +2643,13 @@ static void edp_panel_on(struct intel_dp *intel_dp) void intel_edp_panel_on(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; + if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - edp_panel_on(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_on(intel_dp); } @@ -2655,25 +2688,25 @@ static void edp_panel_off(struct intel_dp *intel_dp) intel_dp->panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ - intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port)); + intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); } void intel_edp_panel_off(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; + if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - edp_panel_off(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_off(intel_dp); } /* Enable backlight in the panel power control. */ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp; - i915_reg_t pp_ctrl_reg; + intel_wakeref_t wakeref; /* * If we enable the backlight right away following a panel power @@ -2683,17 +2716,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) */ wait_backlight_on(intel_dp); - pps_lock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; - pp = ironlake_get_pp_control(intel_dp); - pp |= EDP_BLC_ENABLE; + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_BLC_ENABLE; - pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); - - pps_unlock(intel_dp); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } } /* Enable backlight PWM and backlight PP control. */ @@ -2715,23 +2747,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp; - i915_reg_t pp_ctrl_reg; + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - - pp = ironlake_get_pp_control(intel_dp); - pp &= ~EDP_BLC_ENABLE; - - pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_BLC_ENABLE; - pps_unlock(intel_dp); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } intel_dp->last_backlight_off = jiffies; edp_wait_backlight_off(intel_dp); @@ -2759,12 +2789,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector, bool enable) { struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + intel_wakeref_t wakeref; bool is_enabled; - pps_lock(intel_dp); - is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; - pps_unlock(intel_dp); - + is_enabled = false; + with_pps_lock(intel_dp, wakeref) + is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; if (is_enabled == enable) return; @@ -2831,7 +2861,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp, * 1. Wait for the start of vertical blank on the enabled pipe going to FDI * 2. Program DP PLL enable */ - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); intel_dp->DP |= DP_PLL_ENABLE; @@ -2981,16 +3011,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, encoder->port, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -3158,20 +3190,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder, static void _intel_dp_set_link_train(struct intel_dp *intel_dp, - uint32_t *DP, - uint8_t dp_train_pat) + u32 *DP, + u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); if (dp_train_pat & train_pat_mask) DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { - uint32_t temp = I915_READ(DP_TP_CTL(port)); + u32 temp = I915_READ(DP_TP_CTL(port)); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -3270,24 +3302,23 @@ static void intel_enable_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - uint32_t dp_reg = I915_READ(intel_dp->output_reg); + u32 dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; if (WARN_ON(dp_reg & DP_PORT_EN)) return; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_init_panel_power_sequencer(encoder, pipe_config); - - intel_dp_enable_port(intel_dp, pipe_config); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_init_panel_power_sequencer(encoder, pipe_config); - edp_panel_vdd_on(intel_dp); - edp_panel_on(intel_dp); - edp_panel_vdd_off(intel_dp, true); + intel_dp_enable_port(intel_dp, pipe_config); - pps_unlock(intel_dp); + edp_panel_vdd_on(intel_dp); + edp_panel_on(intel_dp); + edp_panel_vdd_off(intel_dp, true); + } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { unsigned int lane_mask = 0x0; @@ -3490,14 +3521,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder, * link status information */ bool -intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) { return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } /* These are source-specific values. */ -uint8_t +u8 intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -3516,8 +3547,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } -uint8_t -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; @@ -3562,12 +3593,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) } } -static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) +static u32 vlv_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3648,12 +3679,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) return 0; } -static uint32_t chv_signal_levels(struct intel_dp *intel_dp) +static u32 chv_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u32 deemph_reg_value, margin_reg_value; bool uniq_trans_scale = false; - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3731,10 +3762,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } -static uint32_t -g4x_signal_levels(uint8_t train_set) +static u32 +g4x_signal_levels(u8 train_set) { - uint32_t signal_levels = 0; + u32 signal_levels = 0; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: @@ -3770,8 +3801,8 @@ g4x_signal_levels(uint8_t train_set) } /* SNB CPU eDP voltage swing and pre-emphasis control */ -static uint32_t -snb_cpu_edp_signal_levels(uint8_t train_set) +static u32 +snb_cpu_edp_signal_levels(u8 train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3798,8 +3829,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set) } /* IVB CPU eDP voltage swing and pre-emphasis control */ -static uint32_t -ivb_cpu_edp_signal_levels(uint8_t train_set) +static u32 +ivb_cpu_edp_signal_levels(u8 train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3834,8 +3865,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint32_t signal_levels, mask = 0; - uint8_t train_set = intel_dp->train_set[0]; + u32 signal_levels, mask = 0; + u8 train_set = intel_dp->train_set[0]; if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { signal_levels = bxt_signal_levels(intel_dp); @@ -3849,7 +3880,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { signal_levels = ivb_cpu_edp_signal_levels(train_set); mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; - } else if (IS_GEN6(dev_priv) && port == PORT_A) { + } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { signal_levels = snb_cpu_edp_signal_levels(train_set); mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; } else { @@ -3874,7 +3905,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = @@ -3891,7 +3922,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint32_t val; + u32 val; if (!HAS_DDI(dev_priv)) return; @@ -3911,7 +3942,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A) return; - if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_IDLE_DONE, DP_TP_STATUS_IDLE_DONE, 1)) @@ -3926,7 +3957,7 @@ intel_dp_link_down(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); enum port port = encoder->port; - uint32_t DP = intel_dp->DP; + u32 DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev_priv))) return; @@ -3985,12 +4016,49 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_dp->DP = DP; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - pps_lock(intel_dp); - intel_dp->active_pipe = INVALID_PIPE; - pps_unlock(intel_dp); + intel_wakeref_t wakeref; + + with_pps_lock(intel_dp, wakeref) + intel_dp->active_pipe = INVALID_PIPE; } } +static void +intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) +{ + u8 dpcd_ext[6]; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * if it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, + &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { + DRM_ERROR("DPCD failed read at extended capabilities\n"); + return; + } + + if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); + return; + } + + if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) + return; + + DRM_DEBUG_KMS("Base DPCD: %*ph\n", + (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); + + memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); +} + bool intel_dp_read_dpcd(struct intel_dp *intel_dp) { @@ -3998,6 +4066,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp) sizeof(intel_dp->dpcd)) < 0) return false; /* aux transfer failed */ + intel_dp_extended_receiver_capabilities(intel_dp); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); return intel_dp->dpcd[DP_DPCD_REV] != 0; @@ -4228,7 +4298,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } -u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, int mode_clock, int mode_hdisplay) { u16 bits_per_pixel, max_bpp_small_joiner_ram; @@ -4295,7 +4365,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(uint8_t, min_slice_count, + min_slice_count = min_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); @@ -4313,11 +4383,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } -static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) { int status = 0; int test_link_rate; - uint8_t test_lane_count, test_link_bw; + u8 test_lane_count, test_link_bw; /* (DP CTS 1.2) * 4.3.1.11 */ @@ -4350,10 +4420,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) return DP_TEST_ACK; } -static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) { - uint8_t test_pattern; - uint8_t test_misc; + u8 test_pattern; + u8 test_misc; __be16 h_width, v_height; int status = 0; @@ -4411,9 +4481,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) return DP_TEST_ACK; } -static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) { - uint8_t test_result = DP_TEST_ACK; + u8 test_result = DP_TEST_ACK; struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; @@ -4455,16 +4525,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) return test_result; } -static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { - uint8_t test_result = DP_TEST_NAK; + u8 test_result = DP_TEST_NAK; return test_result; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { - uint8_t response = DP_TEST_NAK; - uint8_t request = 0; + u8 response = DP_TEST_NAK; + u8 request = 0; int status; status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); @@ -4552,12 +4622,10 @@ go_again: return ret; } else { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); intel_dp->is_mst = false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); - /* send a hotplug event */ - drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); } } return -EINVAL; @@ -4720,7 +4788,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp) intel_dp_handle_test_request(intel_dp); if (val & DP_CP_IRQ) - intel_hdcp_check_link(intel_dp->attached_connector); + intel_hdcp_handle_cp_irq(intel_dp->attached_connector); if (val & DP_SINK_SPECIFIC_IRQ) DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); @@ -4790,8 +4858,8 @@ static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); - uint8_t *dpcd = intel_dp->dpcd; - uint8_t type; + u8 *dpcd = intel_dp->dpcd; + u8 type; if (lspcon->active) lspcon_resume(lspcon); @@ -5028,28 +5096,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); } +static const char *tc_type_name(enum tc_port_type type) +{ + static const char * const names[] = { + [TC_PORT_UNKNOWN] = "unknown", + [TC_PORT_LEGACY] = "legacy", + [TC_PORT_TYPEC] = "typec", + [TC_PORT_TBT] = "tbt", + }; + + if (WARN_ON(type >= ARRAY_SIZE(names))) + type = TC_PORT_UNKNOWN; + + return names[type]; +} + static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port, bool is_legacy, bool is_typec, bool is_tbt) { enum port port = intel_dig_port->base.port; enum tc_port_type old_type = intel_dig_port->tc_type; - const char *type_str; WARN_ON(is_legacy + is_typec + is_tbt != 1); - if (is_legacy) { + if (is_legacy) intel_dig_port->tc_type = TC_PORT_LEGACY; - type_str = "legacy"; - } else if (is_typec) { + else if (is_typec) intel_dig_port->tc_type = TC_PORT_TYPEC; - type_str = "typec"; - } else if (is_tbt) { + else if (is_tbt) intel_dig_port->tc_type = TC_PORT_TBT; - type_str = "tbt"; - } else { + else return; - } /* Types are not supposed to be changed at runtime. */ WARN_ON(old_type != TC_PORT_UNKNOWN && @@ -5057,12 +5135,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, if (old_type != intel_dig_port->tc_type) DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), - type_str); + tc_type_name(intel_dig_port->tc_type)); } -static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port); - /* * This function implements the first part of the Connect Flow described by our * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading @@ -5097,6 +5172,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, val = I915_READ(PORT_TX_DFLEXDPPMS); if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + WARN_ON(dig_port->tc_legacy_port); return false; } @@ -5128,8 +5204,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, * See the comment at the connect function. This implements the Disconnect * Flow. */ -static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port) +void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) { enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); @@ -5149,6 +5225,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, I915_WRITE(PORT_TX_DFLEXDPCSSS, val); } + DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", + port_name(dig_port->base.port), + tc_type_name(dig_port->tc_type)); + dig_port->tc_type = TC_PORT_UNKNOWN; } @@ -5170,7 +5250,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, bool is_legacy, is_typec, is_tbt; u32 dpsp; - is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port); + /* + * WARN if we got a legacy port HPD, but VBT didn't mark the port as + * legacy. Treat the port as legacy from now on. + */ + if (WARN_ON(!intel_dig_port->tc_legacy_port && + I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))) + intel_dig_port->tc_legacy_port = true; + is_legacy = intel_dig_port->tc_legacy_port; /* * The spec says we shouldn't be using the ISR bits for detecting @@ -5182,6 +5269,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, if (!is_legacy && !is_typec && !is_tbt) { icl_tc_phy_disconnect(dev_priv, intel_dig_port); + return false; } @@ -5224,7 +5312,7 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (HAS_GMCH_DISPLAY(dev_priv)) { + if (HAS_GMCH(dev_priv)) { if (IS_GM45(dev_priv)) return gm45_digital_port_connected(encoder); else @@ -5233,17 +5321,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 11) return icl_digital_port_connected(encoder); - else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) + else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) return spt_digital_port_connected(encoder); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(encoder); - else if (IS_GEN8(dev_priv)) + else if (IS_GEN(dev_priv, 8)) return bdw_digital_port_connected(encoder); - else if (IS_GEN7(dev_priv)) + else if (IS_GEN(dev_priv, 7)) return ivb_digital_port_connected(encoder); - else if (IS_GEN6(dev_priv)) + else if (IS_GEN(dev_priv, 6)) return snb_digital_port_connected(encoder); - else if (IS_GEN5(dev_priv)) + else if (IS_GEN(dev_priv, 5)) return ilk_digital_port_connected(encoder); MISSING_CASE(INTEL_GEN(dev_priv)); @@ -5305,12 +5393,13 @@ intel_dp_detect(struct drm_connector *connector, enum drm_connector_status status; enum intel_display_power_domain aux_domain = intel_aux_power_domain(dig_port); + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); - intel_display_power_get(dev_priv, aux_domain); + wakeref = intel_display_power_get(dev_priv, aux_domain); /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) @@ -5376,7 +5465,7 @@ intel_dp_detect(struct drm_connector *connector, ret = intel_dp_retrain_link(encoder, ctx); if (ret) { - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); return ret; } } @@ -5400,7 +5489,7 @@ out: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); return status; } @@ -5413,6 +5502,7 @@ intel_dp_force(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); enum intel_display_power_domain aux_domain = intel_aux_power_domain(dig_port); + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -5421,11 +5511,11 @@ intel_dp_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - intel_display_power_get(dev_priv, aux_domain); + wakeref = intel_display_power_get(dev_priv, aux_domain); intel_dp_set_edid(intel_dp); - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); } static int intel_dp_get_modes(struct drm_connector *connector) @@ -5490,21 +5580,22 @@ intel_dp_connector_unregister(struct drm_connector *connector) intel_connector_unregister(connector); } -void intel_dp_encoder_destroy(struct drm_encoder *encoder) +void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &intel_dig_port->dp; intel_dp_mst_encoder_cleanup(intel_dig_port); if (intel_dp_is_edp(intel_dp)) { + intel_wakeref_t wakeref; + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); /* * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); if (intel_dp->edp_notifier.notifier_call) { unregister_reboot_notifier(&intel_dp->edp_notifier); @@ -5513,14 +5604,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) } intel_dp_aux_fini(intel_dp); +} + +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + intel_dp_encoder_flush_work(encoder); drm_encoder_cleanup(encoder); - kfree(intel_dig_port); + kfree(enc_to_dig_port(encoder)); } void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; @@ -5530,9 +5627,20 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) * Make sure vdd is actually turned off here. */ cancel_delayed_work_sync(&intel_dp->panel_vdd_work); - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); +} + +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +{ + long ret; + +#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) + ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, + msecs_to_jiffies(timeout)); + + if (!ret) + DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); } static @@ -5545,7 +5653,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, .address = DP_AUX_HDCP_AKSV, .size = DRM_HDCP_KSV_LEN, }; - uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; + u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; ssize_t dpcd_ret; int ret; @@ -5578,7 +5686,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, } reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; - return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO; + if (reply != DP_AUX_NATIVE_REPLY_ACK) { + DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", + reply); + return -EIO; + } + return 0; } static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, @@ -5754,6 +5867,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, return 0; } +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +static struct hdcp2_dp_msg_data { + u8 msg_id; + u32 offset; + bool msg_detectable; + u32 timeout; + u32 timeout2; /* Added for non_paired situation */ + } hdcp2_msg_data[] = { + {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, + false, HDCP_2_2_CERT_TIMEOUT_MS, 0}, + {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, + true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS}, + {HDCP_2_2_AKE_SEND_PAIRING_INFO, + DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, + HDCP_2_2_PAIRING_TIMEOUT_MS, 0}, + {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, + false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0}, + {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_SEND_RECVID_LIST, + DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0}, + {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_MANAGE, + DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, + false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0}, +/* local define to shovel this through the write_2_2 interface */ +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + {HDCP_2_2_ERRATA_DP_STREAM_TYPE, + DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, + 0, 0}, + }; + +static inline +int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, + HDCP_2_2_DP_RXSTATUS_LEN); + if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool *msg_ready) +{ + u8 rx_status; + int ret; + + *msg_ready = false; + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret < 0) + return ret; + + switch (msg_id) { + case HDCP_2_2_AKE_SEND_HPRIME: + if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_AKE_SEND_PAIRING_INFO: + if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_REP_SEND_RECVID_LIST: + if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + *msg_ready = true; + break; + default: + DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + return -EINVAL; + } + + return 0; +} + +static ssize_t +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, + struct hdcp2_dp_msg_data *hdcp2_msg_data) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + u8 msg_id = hdcp2_msg_data->msg_id; + int ret, timeout; + bool msg_ready = false; + + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) + timeout = hdcp2_msg_data->timeout2; + else + timeout = hdcp2_msg_data->timeout; + + /* + * There is no way to detect the CERT, LPRIME and STREAM_READY + * availability. So Wait for timeout and read the msg. + */ + if (!hdcp2_msg_data->msg_detectable) { + mdelay(timeout); + ret = 0; + } else { + /* + * As we want to check the msg availability at timeout, Ignoring + * the timeout at wait for CP_IRQ. + */ + intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + ret = hdcp2_detect_msg_availability(intel_dig_port, + msg_id, &msg_ready); + if (!msg_ready) + ret = -ETIMEDOUT; + } + + if (ret) + DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); + + return ret; +} + +static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) + if (hdcp2_msg_data[i].msg_id == msg_id) + return &hdcp2_msg_data[i]; + + return NULL; +} + +static +int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, + void *buf, size_t size) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_write, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); + if (!hdcp2_msg_data) + return -EINVAL; + + offset = hdcp2_msg_data->offset; + + /* No msg_id in DP HDCP2.2 msgs */ + bytes_to_write = size - 1; + byte++; + + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + while (bytes_to_write) { + len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; + + ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, + offset, (void *)byte, len); + if (ret < 0) + return ret; + + bytes_to_write -= ret; + byte += ret; + offset += ret; + } + + return size; +} + +static +ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) +{ + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u32 dev_cnt; + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXINFO_OFFSET, + (void *)rx_info, HDCP_2_2_RXINFO_LEN); + if (ret != HDCP_2_2_RXINFO_LEN) + return ret >= 0 ? -EIO : ret; + + dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + HDCP_2_2_DEV_COUNT_LO(rx_info[1])); + + if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; + + ret = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + + return ret; +} + +static +int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size) +{ + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_recv, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); + if (!hdcp2_msg_data) + return -EINVAL; + offset = hdcp2_msg_data->offset; + + ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); + if (ret < 0) + return ret; + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { + ret = get_receiver_id_list_size(intel_dig_port); + if (ret < 0) + return ret; + + size = ret; + } + bytes_to_recv = size - 1; + + /* DP adaptation msgs has no msg_id */ + byte++; + + while (bytes_to_recv) { + len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, + (void *)byte, len); + if (ret < 0) { + DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); + return ret; + } + + bytes_to_recv -= ret; + byte += ret; + offset += ret; + } + byte = buf; + *byte = msg_id; + + return size; +} + +static +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 content_type) +{ + struct hdcp2_dp_errata_stream_type stream_type_msg; + + if (is_repeater) + return 0; + + /* + * Errata for DP: As Stream type is used for encryption, Receiver + * should be communicated with stream type for the decryption of the + * content. + * Repeater will be communicated with stream type as a part of it's + * auth later in time. + */ + stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; + stream_type_msg.stream_type = content_type; + + return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + sizeof(stream_type_msg)); +} + +static +int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +{ + u8 rx_status; + int ret; + + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret) + return ret; + + if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) + ret = HDCP_LINK_INTEGRITY_FAILURE; + else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, + bool *capable) +{ + u8 rx_caps[3]; + int ret; + + *capable = false; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) + *capable = true; + + return 0; +} + static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .write_an_aksv = intel_dp_hdcp_write_an_aksv, .read_bksv = intel_dp_hdcp_read_bksv, @@ -5766,6 +6209,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .toggle_signalling = intel_dp_hdcp_toggle_signalling, .check_link = intel_dp_hdcp_check_link, .hdcp_capable = intel_dp_hdcp_capable, + .write_2_2_msg = intel_dp_hdcp2_write_msg, + .read_2_2_msg = intel_dp_hdcp2_read_msg, + .config_stream_type = intel_dp_hdcp2_config_stream_type, + .check_2_2_link = intel_dp_hdcp2_check_link, + .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .protocol = HDCP_PROTOCOL_DP, }; static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) @@ -5808,6 +6257,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + intel_wakeref_t wakeref; if (!HAS_DDI(dev_priv)) intel_dp->DP = I915_READ(intel_dp->output_reg); @@ -5817,18 +6267,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) intel_dp->reset_link_params = true; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_dp->active_pipe = vlv_active_pipe(intel_dp); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + intel_dp->active_pipe = vlv_active_pipe(intel_dp); - if (intel_dp_is_edp(intel_dp)) { - /* Reinit the power sequencer, in case BIOS did something with it. */ - intel_dp_pps_init(intel_dp); - intel_edp_panel_vdd_sanitize(intel_dp); + if (intel_dp_is_edp(intel_dp)) { + /* + * Reinit the power sequencer, in case BIOS did + * something nasty with it. + */ + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } } - - pps_unlock(intel_dp); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -5861,6 +6312,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum irqreturn ret = IRQ_NONE; + intel_wakeref_t wakeref; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { /* @@ -5883,8 +6335,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) return IRQ_NONE; } - intel_display_power_get(dev_priv, - intel_aux_power_domain(intel_dig_port)); + wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(intel_dig_port)); if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { @@ -5914,7 +6366,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) put_power: intel_display_power_put(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_aux_power_domain(intel_dig_port), + wakeref); return ret; } @@ -5945,7 +6398,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) drm_connector_attach_max_bpc_property(connector, 6, 10); else if (INTEL_GEN(dev_priv) >= 5) drm_connector_attach_max_bpc_property(connector, 6, 12); @@ -5954,7 +6407,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect u32 allowed_scalers; allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); drm_connector_attach_scaling_mode_property(connector, allowed_scalers); @@ -5975,43 +6428,34 @@ static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; + u32 pp_on, pp_off, pp_ctl; struct pps_registers regs; intel_pps_get_registers(intel_dp, ®s); - /* Workaround: Need to write PP_CONTROL with the unlock key as - * the very first thing. */ pp_ctl = ironlake_get_pp_control(intel_dp); + /* Ensure PPS is unlocked */ + if (!HAS_DDI(dev_priv)) + I915_WRITE(regs.pp_ctrl, pp_ctl); + pp_on = I915_READ(regs.pp_on); pp_off = I915_READ(regs.pp_off); - if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) && - !HAS_PCH_ICP(dev_priv)) { - I915_WRITE(regs.pp_ctrl, pp_ctl); - pp_div = I915_READ(regs.pp_div); - } /* Pull timing values out of registers */ - seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - - seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); + seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); + seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); + seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); - seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + if (i915_mmio_reg_valid(regs.pp_div)) { + u32 pp_div; - seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; + pp_div = I915_READ(regs.pp_div); - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) { - seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> - BXT_POWER_CYCLE_DELAY_SHIFT) * 1000; + seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; } else { - seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; + seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; } } @@ -6136,7 +6580,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp_on, pp_off, pp_div, port_sel = 0; + u32 pp_on, pp_off, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->base.port; @@ -6171,23 +6615,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, I915_WRITE(regs.pp_ctrl, pp); } - pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | - (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); - /* Compute the divisor for the pp clock, simply match the Bspec - * formula. */ - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) { - pp_div = I915_READ(regs.pp_ctrl); - pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; - pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) - << BXT_POWER_CYCLE_DELAY_SHIFT); - } else { - pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; - pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) - << PANEL_POWER_CYCLE_DELAY_SHIFT); - } + pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); + pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ @@ -6214,19 +6645,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, I915_WRITE(regs.pp_on, pp_on); I915_WRITE(regs.pp_off, pp_off); - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) - I915_WRITE(regs.pp_ctrl, pp_div); - else - I915_WRITE(regs.pp_div, pp_div); + + /* + * Compute the divisor for the pp clock, simply match the Bspec formula. + */ + if (i915_mmio_reg_valid(regs.pp_div)) { + I915_WRITE(regs.pp_div, + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); + } else { + u32 pp_ctl; + + pp_ctl = I915_READ(regs.pp_ctrl); + pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; + pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); + I915_WRITE(regs.pp_ctrl, pp_ctl); + } DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(regs.pp_on), I915_READ(regs.pp_off), - (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) ? - (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : - I915_READ(regs.pp_div)); + i915_mmio_reg_valid(regs.pp_div) ? + I915_READ(regs.pp_div) : + (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); } static void intel_dp_pps_init(struct intel_dp *intel_dp) @@ -6361,8 +6802,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, } mutex_lock(&dev_priv->drrs.mutex); - if (WARN_ON(dev_priv->drrs.dp)) { - DRM_ERROR("DRRS already enabled\n"); + if (dev_priv->drrs.dp) { + DRM_DEBUG_KMS("DRRS already enabled\n"); goto unlock; } @@ -6597,9 +7038,7 @@ intel_dp_drrs_init(struct intel_connector *connector, return NULL; } - downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode, - &connector->base); - + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); if (!downclock_mode) { DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); return NULL; @@ -6621,9 +7060,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; bool has_dpcd; - struct drm_display_mode *scan; - struct edid *edid; enum pipe pipe = INVALID_PIPE; + intel_wakeref_t wakeref; + struct edid *edid; if (!intel_dp_is_edp(intel_dp)) return true; @@ -6636,20 +7075,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, * eDP and LVDS bail out early in this case to prevent interfering * with an already powered-on LVDS power sequencer. */ - if (intel_get_lvds_encoder(&dev_priv->drm)) { + if (intel_get_lvds_encoder(dev_priv)) { WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); DRM_INFO("LVDS was detected, not registering eDP\n"); return false; } - pps_lock(intel_dp); - - intel_dp_init_panel_power_timestamps(intel_dp); - intel_dp_pps_init(intel_dp); - intel_edp_panel_vdd_sanitize(intel_dp); - - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + intel_dp_init_panel_power_timestamps(intel_dp); + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } /* Cache DPCD and EDID for edp. */ has_dpcd = intel_edp_init_dpcd(intel_dp); @@ -6675,26 +7112,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } intel_connector->edid = edid; - /* prefer fixed mode from EDID if available */ - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - downclock_mode = intel_dp_drrs_init( - intel_connector, fixed_mode); - break; - } - } + fixed_mode = intel_panel_edid_fixed_mode(intel_connector); + if (fixed_mode) + downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); /* fallback to VBT if available for eDP */ - if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { - fixed_mode = drm_mode_duplicate(dev, - dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - } - } + if (!fixed_mode) + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -6734,9 +7158,8 @@ out_vdd_off: * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); return false; } @@ -6828,7 +7251,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) connector->interlace_allowed = true; connector->doublescan_allowed = 0; @@ -6910,6 +7333,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_encoder->suspend = intel_dp_encoder_suspend; if (IS_CHERRYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; @@ -7004,7 +7428,10 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) continue; ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); - if (ret) - intel_dp_check_mst_status(intel_dp); + if (ret) { + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + false); + } } } diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 30be0e39bd5f..b59c87daa4f7 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -24,7 +24,7 @@ #include "intel_drv.h" static void -intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) { DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x", @@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE]) static void intel_get_adjust_train(struct intel_dp *intel_dp, - const uint8_t link_status[DP_LINK_STATUS_SIZE]) + const u8 link_status[DP_LINK_STATUS_SIZE]) { - uint8_t v = 0; - uint8_t p = 0; + u8 v = 0; + u8 p = 0; int lane; - uint8_t voltage_max; - uint8_t preemph_max; + u8 voltage_max; + u8 preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); if (this_v > v) v = this_v; @@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp, static bool intel_dp_set_link_train(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { - uint8_t buf[sizeof(intel_dp->train_set) + 1]; + u8 buf[sizeof(intel_dp->train_set) + 1]; int ret, len; intel_dp_program_link_training_pattern(intel_dp, dp_train_pat); @@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp); @@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { - uint8_t voltage; + u8 voltage; int voltage_tries, cr_tries, max_cr_tries; bool max_vswing_reached = false; - uint8_t link_config[2]; - uint8_t link_bw, rate_select; + u8 link_config[2]; + u8 link_bw, rate_select; if (intel_dp->prepare_link_retrain) intel_dp->prepare_link_retrain(intel_dp); @@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; + u8 link_status[DP_LINK_STATUS_SIZE]; drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); @@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) { int tries; u32 training_pattern; - uint8_t link_status[DP_LINK_STATUS_SIZE]; + u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; training_pattern = intel_dp_training_pattern(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 4de247ddf05f..19d81cef2ab6 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -23,80 +23,114 @@ * */ -#include <drm/drmP.h> #include "i915_drv.h" #include "intel_drv.h" #include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> -static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_atomic_state *state = crtc_state->base.state; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_connector *connector = conn_state->connector; - void *port = to_intel_connector(connector)->port; - struct drm_atomic_state *state = pipe_config->base.state; - int bpp; - int lane_count, slots = 0; - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int mst_pbn; + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + void *port = connector->port; bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); + int bpp, slots = -EINVAL; + + crtc_state->lane_count = limits->max_lane_count; + crtc_state->port_clock = limits->max_clock; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + crtc_state->pipe_bpp = bpp; + + crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, + crtc_state->pipe_bpp); + + slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, + port, crtc_state->pbn); + if (slots == -EDEADLK) + return slots; + if (slots >= 0) + break; + } + + if (slots < 0) { + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); + return slots; + } + + intel_link_compute_m_n(crtc_state->pipe_bpp, + crtc_state->lane_count, + adjusted_mode->crtc_clock, + crtc_state->port_clock, + &crtc_state->dp_m_n, + constant_n); + crtc_state->dp_m_n.tu = slots; + + return 0; +} + +static int intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + void *port = connector->port; + struct link_config_limits limits; + int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - bpp = 24; - if (intel_dp->compliance.test_data.bpc) { - bpp = intel_dp->compliance.test_data.bpc * 3; - DRM_DEBUG_KMS("Setting pipe bpp to %d\n", - bpp); - } + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + pipe_config->has_audio = + drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port); + else + pipe_config->has_audio = + intel_conn_state->force_audio == HDMI_AUDIO_ON; + /* * for MST we always configure max link bw - the spec doesn't * seem to suggest we should do otherwise. */ - lane_count = intel_dp_max_lane_count(intel_dp); - - pipe_config->lane_count = lane_count; + limits.min_clock = + limits.max_clock = intel_dp_max_link_rate(intel_dp); - pipe_config->pipe_bpp = bpp; + limits.min_lane_count = + limits.max_lane_count = intel_dp_max_lane_count(intel_dp); - pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); + limits.min_bpp = 6 * 3; + limits.max_bpp = pipe_config->pipe_bpp; - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port)) - pipe_config->has_audio = true; + intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); - mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); - pipe_config->pbn = mst_pbn; - - /* Zombie connectors can't have VCPI slots */ - if (!drm_connector_is_unregistered(connector)) { - slots = drm_dp_atomic_find_vcpi_slots(state, - &intel_dp->mst_mgr, - port, - mst_pbn); - if (slots < 0) { - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", - slots); - return false; - } - } - - intel_link_compute_m_n(bpp, lane_count, - adjusted_mode->crtc_clock, - pipe_config->port_clock, - &pipe_config->dp_m_n, - constant_n); + ret = intel_dp_mst_compute_link_config(encoder, pipe_config, + conn_state, &limits); + if (ret) + return ret; - pipe_config->dp_m_n.tu = slots; + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -104,38 +138,46 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); - return true; + return 0; } -static int intel_dp_mst_atomic_check(struct drm_connector *connector, - struct drm_connector_state *new_conn_state) +static int +intel_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_connector_state *new_conn_state) { struct drm_atomic_state *state = new_conn_state->state; - struct drm_connector_state *old_conn_state; - struct drm_crtc *old_crtc; + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(state, connector); + struct intel_connector *intel_connector = + to_intel_connector(connector); + struct drm_crtc *new_crtc = new_conn_state->crtc; struct drm_crtc_state *crtc_state; - int slots, ret = 0; + struct drm_dp_mst_topology_mgr *mgr; + int ret; - old_conn_state = drm_atomic_get_old_connector_state(state, connector); - old_crtc = old_conn_state->crtc; - if (!old_crtc) + ret = intel_digital_connector_atomic_check(connector, new_conn_state); + if (ret) return ret; - crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); - slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu; - if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) { - struct drm_dp_mst_topology_mgr *mgr; - struct drm_encoder *old_encoder; + if (!old_conn_state->crtc) + return 0; - old_encoder = old_conn_state->best_encoder; - mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr; + /* We only want to free VCPI if this state disables the CRTC on this + * connector + */ + if (new_crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); - ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots); - if (ret) - DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret); - else - to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0; + if (!crtc_state || + !drm_atomic_crtc_needs_modeset(crtc_state) || + crtc_state->enable) + return 0; } + + mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr; + ret = drm_dp_atomic_release_vcpi_slots(state, mgr, + intel_connector->port); + return ret; } @@ -240,7 +282,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; - uint32_t temp; + u32 temp; /* MST encoders are bound to a crtc, not to a connector, * force the mapping here for get_hw_state. @@ -287,7 +329,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_ACT_SENT, DP_TP_STATUS_ACT_SENT, @@ -352,11 +394,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static int intel_dp_mst_get_modes(struct drm_connector *connector) @@ -371,7 +415,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; if (drm_connector_is_unregistered(connector)) @@ -384,7 +427,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); - mode_rate = intel_dp_link_required(mode->clock, bpp); + mode_rate = intel_dp_link_required(mode->clock, 18); /* TODO - validate mode against available PBN for link */ if (mode->clock < 10000) @@ -457,6 +500,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; + drm_dp_mst_get_port_malloc(port); connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, @@ -484,6 +528,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo if (ret) goto err; + intel_attach_force_audio_property(connector); + intel_attach_broadcast_rgb_property(connector); + drm_connector_attach_max_bpc_property(connector, 6, 12); + return connector; err: @@ -517,20 +565,10 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_put(connector); } -static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) -{ - struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - - drm_kms_helper_hotplug_event(dev); -} - static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, - .hotplug = intel_dp_mst_hotplug, }; static struct intel_dp_mst_encoder * diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 3c7f10d17658..db295c77ff0d 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -341,7 +341,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_PORT_REF_DW3(phy), GRC_DONE, GRC_DONE, 10)) @@ -383,7 +383,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy), + if (intel_wait_for_register_fw(&dev_priv->uncore, + BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) @@ -413,7 +414,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, } if (phy_info->rcomp_phy != -1) { - uint32_t grc_code; + u32 grc_code; bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); @@ -445,7 +446,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - uint32_t val; + u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -515,7 +516,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - uint32_t mask; + u32 mask; bool ok; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -567,8 +568,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, #undef _CHK } -uint8_t -bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count) +u8 +bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) { switch (lane_count) { case 1: @@ -585,7 +586,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count) } void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - uint8_t lane_lat_optim_mask) + u8 lane_lat_optim_mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -610,7 +611,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, } } -uint8_t +u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -618,7 +619,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) enum dpio_phy phy; enum dpio_channel ch; int lane; - uint8_t mask; + u8 mask; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); @@ -739,7 +740,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); if (reset) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index d513ca875c67..e01c057ce50b 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -241,13 +241,13 @@ out: } static struct intel_shared_dpll * -intel_find_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, +intel_find_shared_dpll(struct intel_crtc_state *crtc_state, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_shared_dpll *pll; + struct intel_shared_dpll *pll, *unused_pll = NULL; struct intel_shared_dpll_state *shared_dpll; enum intel_dpll_id i; @@ -257,8 +257,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc, pll = &dev_priv->shared_dplls[i]; /* Only want to check enabled timings first */ - if (shared_dpll[i].crtc_mask == 0) + if (shared_dpll[i].crtc_mask == 0) { + if (!unused_pll) + unused_pll = pll; continue; + } if (memcmp(&crtc_state->dpll_hw_state, &shared_dpll[i].hw_state, @@ -273,14 +276,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc, } /* Ok no matching timings, maybe there's a free one? */ - for (i = range_min; i <= range_max; i++) { - pll = &dev_priv->shared_dplls[i]; - if (shared_dpll[i].crtc_mask == 0) { - DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", - crtc->base.base.id, crtc->base.name, - pll->info->name); - return pll; - } + if (unused_pll) { + DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", + crtc->base.base.id, crtc->base.name, + unused_pll->info->name); + return unused_pll; } return NULL; @@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + intel_wakeref_t wakeref; + u32 val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(PCH_DPLL(id)); @@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->fp0 = I915_READ(PCH_FP0(id)); hw_state->fp1 = I915_READ(PCH_FP1(id)); - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & DPLL_VCO_ENABLE; } @@ -417,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, } static struct intel_shared_dpll * -ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +ibx_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id i; @@ -433,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, crtc->base.base.id, crtc->base.name, pll->info->name); } else { - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } @@ -487,7 +491,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; val = I915_READ(WRPLL_CTL(id)); I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); @@ -497,7 +501,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t val; + u32 val; val = I915_READ(SPLL_CTL); I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); @@ -509,15 +513,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + intel_wakeref_t wakeref; + u32 val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(WRPLL_CTL(id)); hw_state->wrpll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & WRPLL_PLL_ENABLE; } @@ -526,15 +533,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - uint32_t val; + intel_wakeref_t wakeref; + u32 val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(SPLL_CTL); hw_state->spll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & SPLL_PLL_ENABLE; } @@ -630,11 +640,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock) return budget; } -static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, - unsigned r2, unsigned n2, unsigned p, +static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget, + unsigned int r2, unsigned int n2, + unsigned int p, struct hsw_wrpll_rnp *best) { - uint64_t a, b, c, d, diff, diff_best; + u64 a, b, c, d, diff, diff_best; /* No best (r,n,p) yet */ if (best->p == 0) { @@ -693,7 +704,7 @@ static void hsw_ddi_calculate_wrpll(int clock /* in Hz */, unsigned *r2_out, unsigned *n2_out, unsigned *p_out) { - uint64_t freq2k; + u64 freq2k; unsigned p, n2, r2; struct hsw_wrpll_rnp best = { 0, 0, 0 }; unsigned budget; @@ -754,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, - struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state) { struct intel_shared_dpll *pll; - uint32_t val; + u32 val; unsigned int p, n2, r2; - hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); + hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | @@ -770,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, crtc_state->dpll_hw_state.wrpll = val; - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); if (!pll) @@ -780,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, } static struct intel_shared_dpll * -hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock) +hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; + int clock = crtc_state->port_clock; switch (clock / 2) { case 81000: @@ -810,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock) } static struct intel_shared_dpll * -hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +hsw_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state); + pll = hsw_ddi_hdmi_get_dpll(crtc_state); } else if (intel_crtc_has_dp_encoder(crtc_state)) { - pll = hsw_ddi_dp_get_dpll(encoder, clock); + pll = hsw_ddi_dp_get_dpll(crtc_state); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { if (WARN_ON(crtc_state->port_clock / 2 != 135000)) return NULL; @@ -830,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SPLL, DPLL_ID_SPLL); } else { return NULL; @@ -921,7 +930,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; val = I915_READ(DPLL_CTRL1); @@ -951,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(regs[id].ctl, I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL_STATUS, DPLL_LOCK(id), DPLL_LOCK(id), @@ -986,12 +995,15 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - uint32_t val; + u32 val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1011,7 +1023,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -1020,12 +1032,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - uint32_t val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; + u32 val; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1041,15 +1056,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } struct skl_wrpll_context { - uint64_t min_deviation; /* current minimal deviation */ - uint64_t central_freq; /* chosen central freq */ - uint64_t dco_freq; /* chosen dco freq */ + u64 min_deviation; /* current minimal deviation */ + u64 central_freq; /* chosen central freq */ + u64 dco_freq; /* chosen dco freq */ unsigned int p; /* chosen divider */ }; @@ -1065,11 +1080,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) #define SKL_DCO_MAX_NDEVIATION 600 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, - uint64_t central_freq, - uint64_t dco_freq, + u64 central_freq, + u64 dco_freq, unsigned int divider) { - uint64_t deviation; + u64 deviation; deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), central_freq); @@ -1143,21 +1158,21 @@ static void skl_wrpll_get_multipliers(unsigned int p, } struct skl_wrpll_params { - uint32_t dco_fraction; - uint32_t dco_integer; - uint32_t qdiv_ratio; - uint32_t qdiv_mode; - uint32_t kdiv; - uint32_t pdiv; - uint32_t central_freq; + u32 dco_fraction; + u32 dco_integer; + u32 qdiv_ratio; + u32 qdiv_mode; + u32 kdiv; + u32 pdiv; + u32 central_freq; }; static void skl_wrpll_params_populate(struct skl_wrpll_params *params, - uint64_t afe_clock, - uint64_t central_freq, - uint32_t p0, uint32_t p1, uint32_t p2) + u64 afe_clock, + u64 central_freq, + u32 p0, u32 p1, u32 p2) { - uint64_t dco_freq; + u64 dco_freq; switch (central_freq) { case 9600000000ULL: @@ -1223,10 +1238,10 @@ static bool skl_ddi_calculate_wrpll(int clock /* in Hz */, struct skl_wrpll_params *wrpll_params) { - uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ - uint64_t dco_central_freq[3] = {8400000000ULL, - 9000000000ULL, - 9600000000ULL}; + u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ + u64 dco_central_freq[3] = { 8400000000ULL, + 9000000000ULL, + 9600000000ULL }; static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30, 32, 36, 40, 42, 44, 48, 52, 54, 56, 60, 64, 66, 68, @@ -1250,7 +1265,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */, for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { for (i = 0; i < dividers[d].n_dividers; i++) { unsigned int p = dividers[d].list[i]; - uint64_t dco_freq = p * afe_clock; + u64 dco_freq = p * afe_clock; skl_wrpll_try_divider(&ctx, dco_central_freq[dco], @@ -1292,11 +1307,9 @@ skip_remaining_dividers: return true; } -static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - int clock) +static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { - uint32_t ctrl1, cfgcr1, cfgcr2; + u32 ctrl1, cfgcr1, cfgcr2; struct skl_wrpll_params wrpll_params = { 0, }; /* @@ -1307,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) + if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + &wrpll_params)) return false; cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | @@ -1330,17 +1344,16 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, } static bool -skl_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - uint32_t ctrl1; + u32 ctrl1; /* * See comment in intel_dpll_hw_state to understand why we always use 0 * as the DPLL id in this function. */ ctrl1 = DPLL_CTRL1_OVERRIDE(0); - switch (clock / 2) { + switch (crtc_state->port_clock / 2) { case 81000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); break; @@ -1362,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock, break; } - dpll_hw_state->ctrl1 = ctrl1; + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + crtc_state->dpll_hw_state.ctrl1 = ctrl1; + return true; } static struct intel_shared_dpll * -skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +skl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; bool bret; - struct intel_dpll_hw_state dpll_hw_state; - - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); + bret = skl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); return NULL; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); + bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); return NULL; } - crtc_state->dpll_hw_state = dpll_hw_state; } else { return NULL; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL0); else - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL1, DPLL_ID_SKL_DPLL3); if (!pll) @@ -1435,7 +1447,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t temp; + u32 temp; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; @@ -1556,7 +1568,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - uint32_t temp; + u32 temp; temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); temp &= ~PORT_PLL_ENABLE; @@ -1579,14 +1591,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - uint32_t val; - bool ret; + intel_wakeref_t wakeref; enum dpio_phy phy; enum dpio_channel ch; + u32 val; + bool ret; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1643,7 +1658,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -1651,12 +1666,12 @@ out: /* bxt clock parameters */ struct bxt_clk_div { int clock; - uint32_t p1; - uint32_t p2; - uint32_t m2_int; - uint32_t m2_frac; + u32 p1; + u32 p2; + u32 m2_int; + u32 m2_frac; bool m2_frac_en; - uint32_t n; + u32 n; int vco; }; @@ -1673,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = { }; static bool -bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, int clock, +bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct bxt_clk_div *clk_div) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct dpll best_clock; /* Calculate HDMI div */ @@ -1684,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, * FIXME: tie the following calculation into * i9xx_crtc_compute_clock */ - if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { + if (!bxt_find_best_dpll(crtc_state, &best_clock)) { DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", - clock, pipe_name(intel_crtc->pipe)); + crtc_state->port_clock, + pipe_name(crtc->pipe)); return false; } @@ -1703,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, return true; } -static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) +static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, + struct bxt_clk_div *clk_div) { + int clock = crtc_state->port_clock; int i; *clk_div = bxt_dp_clk_val[0]; @@ -1718,13 +1736,16 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2; } -static bool bxt_ddi_set_dpll_hw_state(int clock, - struct bxt_clk_div *clk_div, - struct intel_dpll_hw_state *dpll_hw_state) +static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, + const struct bxt_clk_div *clk_div) { + struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; + int clock = crtc_state->port_clock; int vco = clk_div->vco; - uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; - uint32_t lanestagger; + u32 prop_coef, int_coef, gain_ctl, targ_cnt; + u32 lanestagger; + + memset(dpll_hw_state, 0, sizeof(*dpll_hw_state)); if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; @@ -1785,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock, } static bool -bxt_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - struct bxt_clk_div clk_div = {0}; + struct bxt_clk_div clk_div = {}; - bxt_ddi_dp_pll_dividers(clock, &clk_div); + bxt_ddi_dp_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); + return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } static bool -bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, int clock, - struct intel_dpll_hw_state *dpll_hw_state) +bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - struct bxt_clk_div clk_div = { }; + struct bxt_clk_div clk_div = {}; - bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div); + bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); + return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } static struct intel_shared_dpll * -bxt_get_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +bxt_get_dpll(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) { - struct intel_dpll_hw_state dpll_hw_state = { }; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; - int i, clock = crtc_state->port_clock; + enum intel_dpll_id id; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock, - &dpll_hw_state)) + !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) return NULL; if (intel_crtc_has_dp_encoder(crtc_state) && - !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) + !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) return NULL; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - - crtc_state->dpll_hw_state = dpll_hw_state; - /* 1:1 mapping between ports and PLLs */ - i = (enum intel_dpll_id) encoder->port; - pll = intel_get_shared_dpll_by_id(dev_priv, i); + id = (enum intel_dpll_id) encoder->port; + pll = intel_get_shared_dpll_by_id(dev_priv, id); DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); @@ -1873,7 +1884,7 @@ static void intel_ddi_pll_init(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_GEN(dev_priv) < 9) { - uint32_t val = I915_READ(LCPLL_CTL); + u32 val = I915_READ(LCPLL_CTL); /* * The LCPLL register should be turned on by the BIOS. For now @@ -1892,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev) struct intel_dpll_mgr { const struct dpll_info *dpll_info; - struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, + struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); void (*dump_hw_state)(struct drm_i915_private *dev_priv, @@ -1959,7 +1969,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; /* 1. Enable DPLL power in DPLL_ENABLE. */ val = I915_READ(CNL_DPLL_ENABLE(id)); @@ -1967,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, PLL_POWER_STATE, @@ -2008,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for PLL lock status in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_LOCK, PLL_LOCK, @@ -2034,7 +2044,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; /* * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI. @@ -2056,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_LOCK, 0, @@ -2078,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, 0, @@ -2091,10 +2101,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + intel_wakeref_t wakeref; + u32 val; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -2113,7 +2126,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -2220,12 +2233,12 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) } static bool -cnl_ddi_calculate_wrpll(int clock, - struct drm_i915_private *dev_priv, +cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { - u32 afe_clock = clock * 5; - uint32_t ref_clock; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 afe_clock = crtc_state->port_clock * 5; + u32 ref_clock; u32 dco_min = 7998000; u32 dco_max = 10000000; u32 dco_mid = (dco_min + dco_max) / 2; @@ -2260,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock, ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, - kdiv); + cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, + pdiv, qdiv, kdiv); return true; } -static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - int clock) +static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t cfgcr0, cfgcr1; + u32 cfgcr0, cfgcr1; struct skl_wrpll_params wrpll_params = { 0, }; cfgcr0 = DPLL_CFGCR0_HDMI_MODE; - if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params)) + if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params)) return false; cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) | @@ -2297,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, } static bool -cnl_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - uint32_t cfgcr0; + u32 cfgcr0; cfgcr0 = DPLL_CFGCR0_SSC_ENABLE; - switch (clock / 2) { + switch (crtc_state->port_clock / 2) { case 81000: cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810; break; @@ -2334,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock, break; } - dpll_hw_state->cfgcr0 = cfgcr0; + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; + return true; } static struct intel_shared_dpll * -cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +cnl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; bool bret; - struct intel_dpll_hw_state dpll_hw_state; - - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); + bret = cnl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); return NULL; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); + bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); return NULL; } - crtc_state->dpll_hw_state = dpll_hw_state; } else { DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", crtc_state->output_types); return NULL; } - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL2); if (!pll) { @@ -2409,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = { .dump_hw_state = cnl_dump_hw_state, }; +struct icl_combo_pll_params { + int clock; + struct skl_wrpll_params wrpll; +}; + /* * These values alrea already adjusted: they're the bits we write to the * registers, not the logical values. */ -static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = { - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ - .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ - .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ - .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2}, - { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ - .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, +static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = { + { 540000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 270000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ + .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 162000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ + .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 324000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 216000, + { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, + { 432000, + { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 648000, + { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 810000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ + .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; + /* Also used for 38.4 MHz values. */ -static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = { - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ - .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ - .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ - .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2}, - { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ - .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, +static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = { + { 540000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 270000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ + .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 162000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ + .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 324000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 216000, + { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, + { 432000, + { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 648000, + { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 810000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ + .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { @@ -2462,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }; -static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, +static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - const struct skl_wrpll_params *params; - - params = dev_priv->cdclk.hw.ref == 24000 ? - icl_dp_combo_pll_24MHz_values : - icl_dp_combo_pll_19_2MHz_values; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + const struct icl_combo_pll_params *params = + dev_priv->cdclk.hw.ref == 24000 ? + icl_dp_combo_pll_24MHz_values : + icl_dp_combo_pll_19_2MHz_values; + int clock = crtc_state->port_clock; + int i; - switch (clock) { - case 540000: - *pll_params = params[0]; - break; - case 270000: - *pll_params = params[1]; - break; - case 162000: - *pll_params = params[2]; - break; - case 324000: - *pll_params = params[3]; - break; - case 216000: - *pll_params = params[4]; - break; - case 432000: - *pll_params = params[5]; - break; - case 648000: - *pll_params = params[6]; - break; - case 810000: - *pll_params = params[7]; - break; - default: - MISSING_CASE(clock); - return false; + for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { + if (clock == params[i].clock) { + *pll_params = params[i].wrpll; + return true; + } } - return true; + MISSING_CASE(clock); + return false; } -static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock, +static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + *pll_params = dev_priv->cdclk.hw.ref == 24000 ? icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values; return true; } static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder, int clock, - struct intel_dpll_hw_state *pll_state) + struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - uint32_t cfgcr0, cfgcr1; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 cfgcr0, cfgcr1; struct skl_wrpll_params pll_params = { 0 }; bool ret; if (intel_port_is_tc(dev_priv, encoder->port)) - ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); + ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); + ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params); else - ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); + ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); if (!ret) return false; @@ -2541,102 +2552,31 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, DPLL_CFGCR1_PDIV(pll_params.pdiv) | DPLL_CFGCR1_CENTRAL_FREQ_8400; - pll_state->cfgcr0 = cfgcr0; - pll_state->cfgcr1 = cfgcr1; - return true; -} - -int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - uint32_t pll_id) -{ - uint32_t cfgcr0, cfgcr1; - uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction; - const struct skl_wrpll_params *params; - int index, n_entries, link_clock; - - /* Read back values from DPLL CFGCR registers */ - cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); - - dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK; - dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT; - pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT; - kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT; - qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >> - DPLL_CFGCR1_QDIV_MODE_SHIFT; - qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> - DPLL_CFGCR1_QDIV_RATIO_SHIFT; - - params = dev_priv->cdclk.hw.ref == 24000 ? - icl_dp_combo_pll_24MHz_values : - icl_dp_combo_pll_19_2MHz_values; - n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); - - for (index = 0; index < n_entries; index++) { - if (dco_integer == params[index].dco_integer && - dco_fraction == params[index].dco_fraction && - pdiv == params[index].pdiv && - kdiv == params[index].kdiv && - qdiv_mode == params[index].qdiv_mode && - qdiv_ratio == params[index].qdiv_ratio) - break; - } + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); - /* Map PLL Index to Link Clock */ - switch (index) { - default: - MISSING_CASE(index); - /* fall through */ - case 0: - link_clock = 540000; - break; - case 1: - link_clock = 270000; - break; - case 2: - link_clock = 162000; - break; - case 3: - link_clock = 324000; - break; - case 4: - link_clock = 216000; - break; - case 5: - link_clock = 432000; - break; - case 6: - link_clock = 648000; - break; - case 7: - link_clock = 810000; - break; - } + crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; + crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - return link_clock; + return true; } -static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id) -{ - return id - DPLL_ID_ICL_MGPLL1 + PORT_C; -} -enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) +static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) { - return port - PORT_C + DPLL_ID_ICL_MGPLL1; + return id - DPLL_ID_ICL_MGPLL1; } -bool intel_dpll_is_combophy(enum intel_dpll_id id) +enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) { - return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1; + return tc_port + DPLL_ID_ICL_MGPLL1; } static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, - uint32_t *target_dco_khz, + u32 *target_dco_khz, struct intel_dpll_hw_state *state) { - uint32_t dco_min_freq, dco_max_freq; + u32 dco_min_freq, dco_max_freq; int div1_vals[] = {7, 5, 3, 2}; unsigned int i; int div2; @@ -2706,21 +2646,23 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder, int clock, - struct intel_dpll_hw_state *pll_state) +static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state; int refclk_khz = dev_priv->cdclk.hw.ref; - uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; - uint32_t iref_ndiv, iref_trim, iref_pulse_w; - uint32_t prop_coeff, int_coeff; - uint32_t tdc_targetcnt, feedfwgain; - uint64_t ssc_stepsize, ssc_steplen, ssc_steplog; - uint64_t tmp; + int clock = crtc_state->port_clock; + u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; + u32 iref_ndiv, iref_trim, iref_pulse_w; + u32 prop_coeff, int_coeff; + u32 tdc_targetcnt, feedfwgain; + u64 ssc_stepsize, ssc_steplen, ssc_steplog; + u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); + memset(pll_state, 0, sizeof(*pll_state)); + if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, pll_state)) { DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); @@ -2740,7 +2682,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } m2div_rem = dco_khz % (refclk_khz * m1div); - tmp = (uint64_t)m2div_rem * (1 << 22); + tmp = (u64)m2div_rem * (1 << 22); do_div(tmp, refclk_khz * m1div); m2div_frac = tmp; @@ -2799,11 +2741,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } if (use_ssc) { - tmp = (uint64_t)dco_khz * 47 * 32; + tmp = (u64)dco_khz * 47 * 32; do_div(tmp, refclk_khz * m1div * 10000); ssc_stepsize = tmp; - tmp = (uint64_t)dco_khz * 1000; + tmp = (u64)dco_khz * 1000; ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32); } else { ssc_stepsize = 0; @@ -2870,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } static struct intel_shared_dpll * -icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +icl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct intel_digital_port *intel_dig_port; struct intel_shared_dpll *pll; - struct intel_dpll_hw_state pll_state = {}; enum port port = encoder->port; enum intel_dpll_id min, max; - int clock = crtc_state->port_clock; bool ret; if (intel_port_is_combophy(dev_priv, port)) { min = DPLL_ID_ICL_DPLL0; max = DPLL_ID_ICL_DPLL1; - ret = icl_calc_dpll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_dpll_state(crtc_state, encoder); } else if (intel_port_is_tc(dev_priv, port)) { if (encoder->type == INTEL_OUTPUT_DP_MST) { struct intel_dp_mst_encoder *mst_encoder; @@ -2900,13 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, if (intel_dig_port->tc_type == TC_PORT_TBT) { min = DPLL_ID_ICL_TBTPLL; max = min; - ret = icl_calc_dpll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_dpll_state(crtc_state, encoder); } else { - min = icl_port_to_mg_pll_id(port); + enum tc_port tc_port; + + tc_port = intel_port_to_tc(dev_priv, port); + min = icl_tc_port_to_pll_id(tc_port); max = min; - ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_mg_pll_state(crtc_state); } } else { MISSING_CASE(port); @@ -2918,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, return NULL; } - crtc_state->dpll_hw_state = pll_state; - pll = intel_find_shared_dpll(crtc, crtc_state, min, max); + pll = intel_find_shared_dpll(crtc_state, min, max); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); return NULL; @@ -2931,86 +2870,111 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, return pll; } -static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) +static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) { - if (intel_dpll_is_combophy(id)) - return CNL_DPLL_ENABLE(id); - else if (id == DPLL_ID_ICL_TBTPLL) - return TBT_PLL_ENABLE; - else - /* - * TODO: Make MG_PLL macros use - * tc port id instead of port id - */ - return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); + const enum intel_dpll_id id = pll->info->id; + enum tc_port tc_port = icl_pll_id_to_tc_port(id); + intel_wakeref_t wakeref; + bool ret = false; + u32 val; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) + return false; + + val = I915_READ(MG_PLL_ENABLE(tc_port)); + if (!(val & PLL_ENABLE)) + goto out; + + hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + + hw_state->mg_clktop2_coreclkctl1 = + I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + + hw_state->mg_clktop2_hsclkctl = + I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + + hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); + hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port)); + hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port)); + hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port)); + hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port)); + + hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port)); + hw_state->mg_pll_tdc_coldst_bias = + I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); + + if (dev_priv->cdclk.hw.ref == 38400) { + hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + hw_state->mg_pll_bias_mask = 0; + } else { + hw_state->mg_pll_tdc_coldst_bias_mask = -1U; + hw_state->mg_pll_bias_mask = -1U; + } + + hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; + hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; + + ret = true; +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); + return ret; } static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *hw_state, + i915_reg_t enable_reg) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; - enum port port; + intel_wakeref_t wakeref; bool ret = false; + u32 val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; - val = I915_READ(icl_pll_id_to_enable_reg(id)); + val = I915_READ(enable_reg); if (!(val & PLL_ENABLE)) goto out; - if (intel_dpll_is_combophy(id) || - id == DPLL_ID_ICL_TBTPLL) { - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); - } else { - port = icl_mg_pll_id_to_port(id); - hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); - hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; - - hw_state->mg_clktop2_coreclkctl1 = - I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); - hw_state->mg_clktop2_coreclkctl1 &= - MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - - hw_state->mg_clktop2_hsclkctl = - I915_READ(MG_CLKTOP2_HSCLKCTL(port)); - hw_state->mg_clktop2_hsclkctl &= - MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | - MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; - - hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); - hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port)); - hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port)); - hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port)); - hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port)); - - hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port)); - hw_state->mg_pll_tdc_coldst_bias = - I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); - - if (dev_priv->cdclk.hw.ref == 38400) { - hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; - hw_state->mg_pll_bias_mask = 0; - } else { - hw_state->mg_pll_tdc_coldst_bias_mask = -1U; - hw_state->mg_pll_bias_mask = -1U; - } - - hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; - hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; - } + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } +static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return icl_pll_get_hw_state(dev_priv, pll, hw_state, + CNL_DPLL_ENABLE(pll->info->id)); +} + +static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE); +} + static void icl_dpll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { @@ -3026,7 +2990,7 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; - enum port port = icl_mg_pll_id_to_port(pll->info->id); + enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); u32 val; /* @@ -3035,49 +2999,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ - val = I915_READ(MG_REFCLKIN_CTL(port)); + val = I915_READ(MG_REFCLKIN_CTL(tc_port)); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - I915_WRITE(MG_REFCLKIN_CTL(port), val); + I915_WRITE(MG_REFCLKIN_CTL(tc_port), val); - val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); + val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val); + I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val); - val = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val); + I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val); - I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0); - I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1); - I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf); - I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock); - I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc); + I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); + I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); + I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf); + I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock); + I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); - val = I915_READ(MG_PLL_BIAS(port)); + val = I915_READ(MG_PLL_BIAS(tc_port)); val &= ~hw_state->mg_pll_bias_mask; val |= hw_state->mg_pll_bias; - I915_WRITE(MG_PLL_BIAS(port), val); + I915_WRITE(MG_PLL_BIAS(tc_port), val); - val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); + val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; val |= hw_state->mg_pll_tdc_coldst_bias; - I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val); + I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val); - POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port)); + POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); } -static void icl_pll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) +static void icl_pll_power_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) { - const enum intel_dpll_id id = pll->info->id; - i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); - uint32_t val; + u32 val; val = I915_READ(enable_reg); val |= PLL_POWER_ENABLE; @@ -3087,38 +3050,91 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, - PLL_POWER_STATE, 1)) - DRM_ERROR("PLL %d Power not enabled\n", id); + if (intel_wait_for_register(&dev_priv->uncore, enable_reg, + PLL_POWER_STATE, PLL_POWER_STATE, 1)) + DRM_ERROR("PLL %d Power not enabled\n", pll->info->id); +} - if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL) - icl_dpll_write(dev_priv, pll); - else - icl_mg_pll_write(dev_priv, pll); +static void icl_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) +{ + u32 val; + + val = I915_READ(enable_reg); + val |= PLL_ENABLE; + I915_WRITE(enable_reg, val); + + /* Timeout is actually 600us. */ + if (intel_wait_for_register(&dev_priv->uncore, enable_reg, + PLL_LOCK, PLL_LOCK, 1)) + DRM_ERROR("PLL %d not locked\n", pll->info->id); +} + +static void combo_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + icl_pll_power_enable(dev_priv, pll, enable_reg); + + icl_dpll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do - * nothign here. + * nothing here. */ - val = I915_READ(enable_reg); - val |= PLL_ENABLE; - I915_WRITE(enable_reg, val); + icl_pll_enable(dev_priv, pll, enable_reg); - if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK, - 1)) /* 600us actually. */ - DRM_ERROR("PLL %d not locked\n", id); + /* DVFS post sequence would be here. See the comment above. */ +} + +static void tbt_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE); + + icl_dpll_write(dev_priv, pll); + + /* + * DVFS pre sequence would be here, but in our driver the cdclk code + * paths should already be setting the appropriate voltage, hence we do + * nothing here. + */ + + icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE); + + /* DVFS post sequence would be here. See the comment above. */ +} + +static void mg_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = + MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); + + icl_pll_power_enable(dev_priv, pll, enable_reg); + + icl_mg_pll_write(dev_priv, pll); + + /* + * DVFS pre sequence would be here, but in our driver the cdclk code + * paths should already be setting the appropriate voltage, hence we do + * nothing here. + */ + + icl_pll_enable(dev_priv, pll, enable_reg); /* DVFS post sequence would be here. See the comment above. */ } static void icl_pll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) { - const enum intel_dpll_id id = pll->info->id; - i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); - uint32_t val; + u32 val; /* The first steps are done by intel_ddi_post_disable(). */ @@ -3133,8 +3149,9 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(enable_reg, val); /* Timeout is actually 1us. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1)) - DRM_ERROR("PLL %d locked\n", id); + if (intel_wait_for_register(&dev_priv->uncore, + enable_reg, PLL_LOCK, 0, 1)) + DRM_ERROR("PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ @@ -3146,9 +3163,30 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0, - 1)) - DRM_ERROR("PLL %d Power not disabled\n", id); + if (intel_wait_for_register(&dev_priv->uncore, + enable_reg, PLL_POWER_STATE, 0, 1)) + DRM_ERROR("PLL %d Power not disabled\n", pll->info->id); +} + +static void combo_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id)); +} + +static void tbt_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE); +} + +static void mg_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = + MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); + + icl_pll_disable(dev_priv, pll, enable_reg); } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -3173,20 +3211,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias); } -static const struct intel_shared_dpll_funcs icl_pll_funcs = { - .enable = icl_pll_enable, - .disable = icl_pll_disable, - .get_hw_state = icl_pll_get_hw_state, +static const struct intel_shared_dpll_funcs combo_pll_funcs = { + .enable = combo_pll_enable, + .disable = combo_pll_disable, + .get_hw_state = combo_pll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs tbt_pll_funcs = { + .enable = tbt_pll_enable, + .disable = tbt_pll_disable, + .get_hw_state = tbt_pll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs mg_pll_funcs = { + .enable = mg_pll_enable, + .disable = mg_pll_disable, + .get_hw_state = mg_pll_get_hw_state, }; static const struct dpll_info icl_plls[] = { - { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, - { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, - { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, - { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, - { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, - { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, - { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, + { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, + { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, + { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, + { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, { }, }; @@ -3196,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info ehl_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { }, +}; + +static const struct intel_dpll_mgr ehl_pll_mgr = { + .dpll_info = ehl_plls, + .get_dpll = icl_get_dpll, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -3209,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_ICELAKE(dev_priv)) + if (IS_ELKHARTLAKE(dev_priv)) + dpll_mgr = &ehl_pll_mgr; + else if (INTEL_GEN(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; else if (IS_CANNONLAKE(dev_priv)) dpll_mgr = &cnl_pll_mgr; @@ -3247,31 +3311,29 @@ void intel_shared_dpll_init(struct drm_device *dev) /** * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination - * @crtc: CRTC - * @crtc_state: atomic state for @crtc + * @crtc_state: atomic state for the crtc * @encoder: encoder * * Find an appropriate DPLL for the given CRTC and encoder combination. A - * reference from the @crtc to the returned pll is registered in the atomic - * state. That configuration is made effective by calling + * reference from the @crtc_state to the returned pll is registered in the + * atomic state. That configuration is made effective by calling * intel_shared_dpll_swap_state(). The reference should be released by calling * intel_release_shared_dpll(). * * Returns: - * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state. + * A shared DPLL to be used by @crtc_state and @encoder. */ struct intel_shared_dpll * -intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, +intel_get_shared_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; if (WARN_ON(!dpll_mgr)) return NULL; - return dpll_mgr->get_dpll(crtc, crtc_state, encoder); + return dpll_mgr->get_dpll(crtc_state, encoder); } /** diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index a033d8f06d4a..bd8124cc81ed 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -138,14 +138,14 @@ enum intel_dpll_id { struct intel_dpll_hw_state { /* i9xx, pch plls */ - uint32_t dpll; - uint32_t dpll_md; - uint32_t fp0; - uint32_t fp1; + u32 dpll; + u32 dpll_md; + u32 fp0; + u32 fp1; /* hsw, bdw */ - uint32_t wrpll; - uint32_t spll; + u32 wrpll; + u32 spll; /* skl */ /* @@ -154,34 +154,33 @@ struct intel_dpll_hw_state { * the register. This allows us to easily compare the state to share * the DPLL. */ - uint32_t ctrl1; + u32 ctrl1; /* HDMI only, 0 when used for DP */ - uint32_t cfgcr1, cfgcr2; + u32 cfgcr1, cfgcr2; /* cnl */ - uint32_t cfgcr0; + u32 cfgcr0; /* CNL also uses cfgcr1 */ /* bxt */ - uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, - pcsdw12; + u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12; /* * ICL uses the following, already defined: - * uint32_t cfgcr0, cfgcr1; - */ - uint32_t mg_refclkin_ctl; - uint32_t mg_clktop2_coreclkctl1; - uint32_t mg_clktop2_hsclkctl; - uint32_t mg_pll_div0; - uint32_t mg_pll_div1; - uint32_t mg_pll_lf; - uint32_t mg_pll_frac_lock; - uint32_t mg_pll_ssc; - uint32_t mg_pll_bias; - uint32_t mg_pll_tdc_coldst_bias; - uint32_t mg_pll_bias_mask; - uint32_t mg_pll_tdc_coldst_bias_mask; + * u32 cfgcr0, cfgcr1; + */ + u32 mg_refclkin_ctl; + u32 mg_clktop2_coreclkctl1; + u32 mg_clktop2_hsclkctl; + u32 mg_pll_div0; + u32 mg_pll_div1; + u32 mg_pll_lf; + u32 mg_pll_frac_lock; + u32 mg_pll_ssc; + u32 mg_pll_bias; + u32 mg_pll_tdc_coldst_bias; + u32 mg_pll_bias_mask; + u32 mg_pll_tdc_coldst_bias_mask; }; /** @@ -280,7 +279,7 @@ struct dpll_info { * Inform the state checker that the DPLL is kept enabled even if * not in use by any CRTC. */ - uint32_t flags; + u32 flags; }; /** @@ -328,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *state, +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state, struct intel_encoder *encoder); void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_crtc *crtc, @@ -342,10 +340,8 @@ void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); -int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - uint32_t pll_id); int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); -enum intel_dpll_id icl_port_to_mg_pll_id(enum port port); +enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); #endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..64544613920b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -29,18 +29,23 @@ #include <linux/i2c.h> #include <linux/hdmi.h> #include <linux/sched/clock.h> +#include <linux/stackdepot.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_dp_dual_mode_helper.h> #include <drm/drm_dp_mst_helper.h> +#include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> +#include <drm/drm_vblank.h> #include <drm/drm_atomic.h> +#include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> +struct drm_printer; + /** * __wait_for - magic wait macro * @@ -209,6 +214,16 @@ struct intel_fbdev { unsigned long vma_flags; async_cookie_t cookie; int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended : 1; + /* Set when a hotplug was received while HPD processing was + * suspended + */ + bool hpd_waiting : 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; }; struct intel_encoder { @@ -222,9 +237,9 @@ struct intel_encoder { enum intel_output_type (*compute_output_type)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); - bool (*compute_config)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + int (*compute_config)(struct intel_encoder *, + struct intel_crtc_state *, + struct drm_connector_state *); void (*pre_pll_enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -243,6 +258,9 @@ struct intel_encoder { void (*post_pll_disable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); + void (*update_pipe)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); /* Read out the current hw state of this connector, returning true if * the encoder is active. If the encoder is enabled it also set the pipe * it is connected to in the pipe parameter. */ @@ -294,19 +312,25 @@ struct intel_panel { /* Connector and platform specific backlight functions */ int (*setup)(struct intel_connector *connector, enum pipe pipe); - uint32_t (*get)(struct intel_connector *connector); - void (*set)(const struct drm_connector_state *conn_state, uint32_t level); + u32 (*get)(struct intel_connector *connector); + void (*set)(const struct drm_connector_state *conn_state, u32 level); void (*disable)(const struct drm_connector_state *conn_state); void (*enable)(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); - uint32_t (*hz_to_pwm)(struct intel_connector *connector, - uint32_t hz); + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); void (*power)(struct intel_connector *, bool enable); } backlight; }; struct intel_digital_port; +enum check_link_response { + HDCP_LINK_PROTECTED = 0, + HDCP_TOPOLOGY_CHANGE, + HDCP_LINK_INTEGRITY_FAILURE, + HDCP_REAUTH_REQUEST +}; + /* * This structure serves as a translation layer between the generic HDCP code * and the bus-specific code. What that means is that HDCP over HDMI differs @@ -379,6 +403,32 @@ struct intel_hdcp_shim { /* Detects panel's hdcp capability. This is optional for HDMI. */ int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, bool *hdcp_capable); + + /* HDCP adaptation(DP/HDMI) required on the port */ + enum hdcp_wired_protocol protocol; + + /* Detects whether sink is HDCP2.2 capable */ + int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port, + bool *capable); + + /* Write HDCP2.2 messages */ + int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port, + void *buf, size_t size); + + /* Read HDCP2.2 messages */ + int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size); + + /* + * Implementation of DP HDCP2.2 Errata for the communication of stream + * type to Receivers. In DP HDCP2.2 Stream type is one of the input to + * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. + */ + int (*config_stream_type)(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 type); + + /* HDCP2.2 Link Integrity Check */ + int (*check_2_2_link)(struct intel_digital_port *intel_dig_port); }; struct intel_hdcp { @@ -388,6 +438,50 @@ struct intel_hdcp { u64 value; struct delayed_work check_work; struct work_struct prop_work; + + /* HDCP1.4 Encryption status */ + bool hdcp_encrypted; + + /* HDCP2.2 related definitions */ + /* Flag indicates whether this connector supports HDCP2.2 or not. */ + bool hdcp2_supported; + + /* HDCP2.2 Encryption status */ + bool hdcp2_encrypted; + + /* + * Content Stream Type defined by content owner. TYPE0(0x0) content can + * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1) + * content can flow only through a link protected by HDCP2.2. + */ + u8 content_type; + struct hdcp_port_data port_data; + + bool is_paired; + bool is_repeater; + + /* + * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT. + * Incremented after processing the RepeaterAuth_Send_ReceiverID_List. + * When it rolls over re-auth has to be triggered. + */ + u32 seq_num_v; + + /* + * Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + */ + u32 seq_num_m; + + /* + * Work queue to signal the CP_IRQ. Used for the waiters to read the + * available information from HDCP DP sink. + */ + wait_queue_head_t cp_irq_queue; + atomic_t cp_irq_count; + int cp_irq_count_cached; }; struct intel_connector { @@ -462,6 +556,11 @@ struct intel_atomic_state { * state only when all crtc's are DPMS off. */ struct intel_cdclk_state actual; + + int force_min_cdclk; + bool force_min_cdclk_changed; + /* pipe to which cd2x update is synchronized */ + enum pipe pipe; } cdclk; bool dpll_set, modeset; @@ -592,7 +691,7 @@ struct intel_initial_plane_config { struct intel_scaler { int in_use; - uint32_t mode; + u32 mode; }; struct intel_crtc_scaler_state { @@ -624,13 +723,15 @@ struct intel_crtc_scaler_state { }; /* drm_mode->private_flags */ -#define I915_MODE_FLAG_INHERITED 1 +#define I915_MODE_FLAG_INHERITED (1<<0) /* Flag to get scanline using frame time stamps */ #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1) +/* Flag to use the scanline counter instead of the pixel counter */ +#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2) struct intel_pipe_wm { struct intel_wm_level wm[5]; - uint32_t linetime; + u32 linetime; bool fbc_wm_enabled; bool pipe_enabled; bool sprites_enabled; @@ -646,7 +747,7 @@ struct skl_plane_wm { struct skl_pipe_wm { struct skl_plane_wm planes[I915_MAX_PLANES]; - uint32_t linetime; + u32 linetime; }; enum vlv_wm_level { @@ -659,7 +760,7 @@ enum vlv_wm_level { struct vlv_wm_state { struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS]; struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS]; - uint8_t num_levels; + u8 num_levels; bool cxsr; }; @@ -872,13 +973,13 @@ struct intel_crtc_state { /* Used by SDVO (and if we ever fix it, HDMI). */ unsigned pixel_multiplier; - uint8_t lane_count; + u8 lane_count; /* * Used by platforms having DP/HDMI PHY with programmable lane * latency optimization. */ - uint8_t lane_lat_optim_mask; + u8 lane_lat_optim_mask; /* minimum acceptable voltage level */ u8 min_voltage_level; @@ -903,7 +1004,8 @@ struct intel_crtc_state { struct intel_link_m_n fdi_m_n; bool ips_enabled; - bool ips_force_disable; + + bool crc_enabled; bool enable_fbc; @@ -922,15 +1024,32 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; /* Gamma mode programmed on the pipe */ - uint32_t gamma_mode; + u32 gamma_mode; + + union { + /* CSC mode programmed on the pipe */ + u32 csc_mode; + + /* CHV CGM mode */ + u32 cgm_mode; + }; /* bitmask of visible planes (enum plane_id) */ u8 active_planes; u8 nv12_planes; + u8 c8_planes; /* bitmask of planes that will be updated during the commit */ u8 update_planes; + struct { + u32 enable; + u32 gcp; + union hdmi_infoframe avi; + union hdmi_infoframe spd; + union hdmi_infoframe hdmi; + } infoframes; + /* HDMI scrambling status */ bool hdmi_scrambling; @@ -943,6 +1062,12 @@ struct intel_crtc_state { /* Output down scaling is done in LSPCON device */ bool lspcon_downsampling; + /* enable pipe gamma? */ + bool gamma_enable; + + /* enable pipe csc? */ + bool csc_enable; + /* Display Stream compression state */ struct { bool compression_enable; @@ -971,9 +1096,6 @@ struct intel_crtc { struct intel_crtc_state *config; - /* global reset count when the last flip was submitted */ - unsigned int reset_count; - /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; bool pch_fifo_underrun_disabled; @@ -1008,7 +1130,7 @@ struct intel_plane { enum pipe pipe; bool has_fbc; bool has_ccs; - uint32_t frontbuffer_bit; + u32 frontbuffer_bit; struct { u32 base, cntl, size; @@ -1074,7 +1196,6 @@ struct intel_hdmi { } dp_dual_mode; bool has_hdmi_sink; bool has_audio; - bool rgb_quant_range_selectable; struct intel_connector *attached_connector; struct cec_notifier *cec_notifier; }; @@ -1104,9 +1225,9 @@ enum link_m_n_set { struct intel_dp_compliance_data { unsigned long edid; - uint8_t video_pattern; - uint16_t hdisplay, vdisplay; - uint8_t bpc; + u8 video_pattern; + u16 hdisplay, vdisplay; + u8 bpc; }; struct intel_dp_compliance { @@ -1119,18 +1240,18 @@ struct intel_dp_compliance { struct intel_dp { i915_reg_t output_reg; - uint32_t DP; + u32 DP; int link_rate; - uint8_t lane_count; - uint8_t sink_count; + u8 lane_count; + u8 sink_count; bool link_mst; bool link_trained; bool has_audio; bool reset_link_params; - uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; - uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; - uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; u8 fec_capable; /* source rates */ @@ -1150,7 +1271,7 @@ struct intel_dp { /* sink or branch descriptor */ struct drm_dp_desc desc; struct drm_dp_aux aux; - uint8_t train_set[4]; + u8 train_set[4]; int panel_power_up_delay; int panel_power_down_delay; int panel_power_cycle_delay; @@ -1192,14 +1313,13 @@ struct intel_dp { struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; struct drm_dp_mst_topology_mgr mst_mgr; - uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); + u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index); /* * This function returns the value we have to program the AUX_CTL * register with to kick off an AUX transaction. */ - uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, - int send_bytes, - uint32_t aux_clock_divider); + u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes, + u32 aux_clock_divider); i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp); i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index); @@ -1209,6 +1329,9 @@ struct intel_dp { /* Displayport compliance testing */ struct intel_dp_compliance compliance; + + /* Display stream compression testing */ + bool force_dsc_en; }; enum lspcon_vendor { @@ -1230,21 +1353,26 @@ struct intel_digital_port { struct intel_lspcon lspcon; enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; - uint8_t max_lanes; + u8 max_lanes; /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; + bool tc_legacy_port:1; enum tc_port_type tc_type; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len); + void (*read_infoframe)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); void (*set_infoframes)(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); - bool (*infoframe_enabled)(struct intel_encoder *encoder, + u32 (*infoframes_enabled)(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); }; @@ -1464,8 +1592,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv); void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ -void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv); @@ -1528,7 +1656,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); u32 bxt_signal_levels(struct intel_dp *intel_dp); -uint32_t ddi_signal_levels(struct intel_dp *intel_dp); +u32 ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing); @@ -1536,7 +1664,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id); + struct intel_dpll_hw_state *state); unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height); @@ -1568,12 +1696,24 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); void intel_update_max_cdclk(struct drm_i915_private *dev_priv); void intel_update_cdclk(struct drm_i915_private *dev_priv); void intel_update_rawclk(struct drm_i915_private *dev_priv); +bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b); bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, const struct intel_cdclk_state *b); bool intel_cdclk_changed(const struct intel_cdclk_state *a, const struct intel_cdclk_state *b); -void intel_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state); +void intel_cdclk_swap_state(struct intel_atomic_state *state); +void +intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *old_state, + const struct intel_cdclk_state *new_state, + enum pipe pipe); +void +intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *old_state, + const struct intel_cdclk_state *new_state, + enum pipe pipe); void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, const char *context); @@ -1668,11 +1808,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, - uint64_t *val); + u64 *val); int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, - uint64_t val); + u64 val); int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct drm_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, @@ -1718,7 +1858,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, +bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); @@ -1746,9 +1886,10 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); +u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -u32 glk_color_ctl(const struct intel_plane_state *plane_state); +u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state); u32 skl_plane_stride(const struct intel_plane_state *plane_state, int plane); int skl_check_plane_surface(struct intel_plane_state *plane_state); @@ -1775,6 +1916,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); void intel_attach_force_audio_property(struct drm_connector *connector); void intel_attach_broadcast_rgb_property(struct drm_connector *connector); void intel_attach_aspect_ratio_property(struct drm_connector *connector); +void intel_attach_colorspace_property(struct drm_connector *connector); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); @@ -1784,6 +1926,16 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ +struct link_config_limits { + int min_clock, max_clock; + int min_lane_count, max_lane_count; + int min_bpp, max_bpp; +}; +void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits); +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t dp_reg, enum port port, enum pipe *pipe); @@ -1792,10 +1944,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count, + int link_rate, u8 lane_count, bool link_mst); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count); + int link_rate, u8 lane_count); void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); int intel_dp_retrain_link(struct intel_encoder *encoder, @@ -1806,10 +1958,10 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, bool enable); void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); -void intel_dp_encoder_destroy(struct drm_encoder *encoder); -bool intel_dp_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state); +void intel_dp_encoder_flush_work(struct drm_encoder *encoder); +int intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state); bool intel_dp_is_edp(struct intel_dp *intel_dp); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, @@ -1827,7 +1979,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); -uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +u32 intel_dp_pack_aux(const u8 *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); @@ -1840,24 +1992,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, - uint8_t dp_train_pat); + u8 dp_train_pat); void intel_dp_set_signal_levels(struct intel_dp *intel_dp); void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); -uint8_t +u8 intel_dp_voltage_max(struct intel_dp *intel_dp); -uint8_t -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing); +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, - uint8_t *link_bw, uint8_t *rate_select); + u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool -intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); -uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, - int mode_clock, int mode_hdisplay); -uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, - int mode_hdisplay); +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]); +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, + int mode_clock, int mode_hdisplay); +u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, + int mode_hdisplay); /* intel_vdsc.c */ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, @@ -1874,6 +2026,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); bool intel_digital_port_connected(struct intel_encoder *encoder); +void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port); /* intel_dp_aux_backlight.c */ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); @@ -1967,22 +2121,31 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); -bool intel_hdmi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state); +int intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state); bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, bool scrambling); void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); void intel_infoframe_init(struct intel_digital_port *intel_dig_port); +u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +u32 intel_hdmi_infoframe_enable(unsigned int type); +void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); +void intel_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + enum hdmi_infoframe_type type, + union hdmi_infoframe *frame); /* intel_lvds.c */ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t lvds_reg, enum pipe *pipe); void intel_lvds_init(struct drm_i915_private *dev_priv); -struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); -bool intel_is_dual_link_lvds(struct drm_device *dev); +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv); +bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv); /* intel_overlay.c */ void intel_overlay_setup(struct drm_i915_private *dev_priv); @@ -2014,11 +2177,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); +void intel_panel_update_backlight(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); -extern struct drm_display_mode *intel_find_panel_downclock( - struct drm_i915_private *dev_priv, - struct drm_display_mode *fixed_mode, - struct drm_connector *connector); +struct drm_display_mode * +intel_panel_edid_downclock_mode(struct intel_connector *connector, + const struct drm_display_mode *fixed_mode); +struct drm_display_mode * +intel_panel_edid_fixed_mode(struct intel_connector *connector); +struct drm_display_mode * +intel_panel_vbt_fixed_mode(struct intel_connector *connector); #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) int intel_backlight_device_register(struct intel_connector *connector); @@ -2041,9 +2210,12 @@ int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector); int intel_hdcp_disable(struct intel_connector *connector); -int intel_hdcp_check_link(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); +void intel_hdcp_component_init(struct drm_i915_private *dev_priv); +void intel_hdcp_component_fini(struct drm_i915_private *dev_priv); +void intel_hdcp_cleanup(struct intel_connector *connector); +void intel_hdcp_handle_cp_irq(struct intel_connector *connector); /* intel_psr.c */ #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) @@ -2052,9 +2224,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); -int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, - struct drm_modeset_acquire_ctx *ctx, - u64 value); +void intel_psr_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); @@ -2075,6 +2247,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); void intel_init_quirks(struct drm_i915_private *dev_priv); /* intel_runtime_pm.c */ +void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv); int intel_power_domains_init(struct drm_i915_private *); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); @@ -2097,6 +2270,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); +void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv); const char * intel_display_power_domain_str(enum intel_display_power_domain domain); @@ -2104,33 +2278,48 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); + enum intel_display_power_domain domain, + intel_wakeref_t wakeref); +#else +#define intel_display_power_put(i915, domain, wakeref) \ + intel_display_power_put_unchecked(i915, domain) +#endif void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices); static inline void -assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) +assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm) { - WARN_ONCE(dev_priv->runtime_pm.suspended, + WARN_ONCE(rpm->suspended, "Device suspended during HW access\n"); } static inline void -assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) +__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) { - assert_rpm_device_not_suspended(dev_priv); - WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count), + assert_rpm_device_not_suspended(rpm); + WARN_ONCE(!atomic_read(&rpm->wakeref_count), "RPM wakelock ref not held during HW access"); } +static inline void +assert_rpm_wakelock_held(struct drm_i915_private *i915) +{ + __assert_rpm_wakelock_held(&i915->runtime_pm); +} + /** * disable_rpm_wakeref_asserts - disable the RPM assert checks - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function disable asserts that check if we hold an RPM wakelock * reference, while keeping the device-not-suspended checks still enabled. @@ -2147,14 +2336,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) * enable_rpm_wakeref_asserts(). */ static inline void -disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +disable_rpm_wakeref_asserts(struct drm_i915_private *i915) { - atomic_inc(&dev_priv->runtime_pm.wakeref_count); + atomic_inc(&i915->runtime_pm.wakeref_count); } /** * enable_rpm_wakeref_asserts - re-enable the RPM assert checks - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function re-enables the RPM assert checks after disabling them with * disable_rpm_wakeref_asserts. It's meant to be used only in special @@ -2164,15 +2353,39 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) * disable_rpm_wakeref_asserts(). */ static inline void -enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +enable_rpm_wakeref_asserts(struct drm_i915_private *i915) { - atomic_dec(&dev_priv->runtime_pm.wakeref_count); + atomic_dec(&i915->runtime_pm.wakeref_count); } -void intel_runtime_pm_get(struct drm_i915_private *dev_priv); -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); -void intel_runtime_pm_put(struct drm_i915_private *dev_priv); +intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); + +#define with_intel_runtime_pm(i915, wf) \ + for ((wf) = intel_runtime_pm_get(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + +#define with_intel_runtime_pm_if_in_use(i915, wf) \ + for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + +void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); +#else +#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915) +#endif + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p); +#else +static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p) +{ +} +#endif void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); @@ -2195,21 +2408,20 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); -void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); -void g4x_wm_get_hw_state(struct drm_device *dev); -void vlv_wm_get_hw_state(struct drm_device *dev); -void ilk_wm_get_hw_state(struct drm_device *dev); -void skl_wm_get_hw_state(struct drm_device *dev); +void gen6_rps_boost(struct i915_request *rq); +void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); +void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); +void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); +void skl_wm_get_hw_state(struct drm_i915_private *dev_priv); void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct skl_ddb_entry *ddb_y, struct skl_ddb_entry *ddb_uv); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); -void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, +void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); @@ -2239,6 +2451,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, /* intel_sprite.c */ +bool is_planar_yuv_format(u32 pixelformat); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, @@ -2263,12 +2476,13 @@ static inline bool icl_is_nv12_y_plane(enum plane_id id) return false; } -static inline bool icl_is_hdr_plane(struct intel_plane *plane) +static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { - if (INTEL_GEN(to_i915(plane->base.dev)) < 11) + if (INTEL_GEN(dev_priv) < 11) return false; - return plane->id < PLANE_SPRITE2; + return plane_id < PLANE_SPRITE2; } /* intel_tv.c */ @@ -2278,11 +2492,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv); int intel_digital_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val); + u64 *val); int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val); + u64 val); int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_connector_state *new_state); struct drm_connector_state * @@ -2311,6 +2525,14 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); /* intel_atomic_plane.c */ +void intel_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_update_slave(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); @@ -2327,10 +2549,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_plane_state *intel_state); /* intel_color.c */ -void intel_color_init(struct drm_crtc *crtc); -int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state); -void intel_color_set_csc(struct drm_crtc_state *crtc_state); -void intel_color_load_luts(struct drm_crtc_state *crtc_state); +void intel_color_init(struct intel_crtc *crtc); +int intel_color_check(struct intel_crtc_state *crtc_state); +void intel_color_commit(const struct intel_crtc_state *crtc_state); +void intel_color_load_luts(const struct intel_crtc_state *crtc_state); /* intel_lspcon.c */ bool lspcon_init(struct intel_digital_port *intel_dig_port); @@ -2340,11 +2562,15 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *buf, ssize_t len); +void lspcon_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); void lspcon_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool lspcon_infoframe_enabled(struct intel_encoder *encoder, +u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); void lspcon_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index d968f1f13e09..705a609050c0 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -24,7 +24,6 @@ #ifndef _INTEL_DSI_H #define _INTEL_DSI_H -#include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include "intel_drv.h" @@ -40,6 +39,7 @@ struct intel_dsi { struct intel_encoder base; struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS]; + intel_wakeref_t io_wakeref[I915_MAX_PORTS]; /* GPIO Desc for CRC based Panel control */ struct gpio_desc *gpio_panel; @@ -173,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config); void vlv_dsi_pll_disable(struct intel_encoder *encoder); -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config); void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); @@ -183,13 +183,12 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config); void bxt_dsi_pll_disable(struct intel_encoder *encoder); -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config); void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); -int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index a1a8b3790e61..3074448446bc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -24,15 +24,15 @@ * */ -#include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/i915_drm.h> #include <linux/gpio/consumer.h> +#include <linux/mfd/intel_soc_pmic.h> #include <linux/slab.h> #include <video/mipi_display.h> #include <asm/intel-mid.h> -#include <video/mipi_display.h> +#include <asm/unaligned.h> #include "i915_drv.h" #include "intel_drv.h" #include "intel_dsi.h" @@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, break; } - if (!IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: @@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) /* pull up/down */ value = *data++ & 1; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_exec_gpio(dev_priv, gpio_source, gpio_index, value); else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); @@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) { - DRM_DEBUG_KMS("Skipping PMIC element execution\n"); +#ifdef CONFIG_PMIC_OPREGION + u32 value, mask, reg_address; + u16 i2c_address; + int ret; + + /* byte 0 aka PMIC Flag is reserved */ + i2c_address = get_unaligned_le16(data + 1); + reg_address = get_unaligned_le32(data + 3); + value = get_unaligned_le32(data + 7); + mask = get_unaligned_le32(data + 11); + + ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address, + reg_address, + value, mask); + if (ret) + DRM_ERROR("%s failed, error: %d\n", __func__, ret); +#else + DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); +#endif return data + 15; } @@ -514,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) msleep(msec); } -int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) -{ - struct intel_connector *connector = intel_dsi->attached_connector; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (!mode) - return 0; - - mode->type |= DRM_MODE_TYPE_PREFERRED; - - drm_mode_probed_add(&connector->base, mode); - - return 1; -} - #define ICL_PREPARE_CNT_MAX 0x7 #define ICL_CLK_ZERO_CNT_MAX 0xf #define ICL_TRAIL_CNT_MAX 0x7 @@ -872,7 +872,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->burst_mode_ratio = burst_mode_ratio; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_dphy_param_init(intel_dsi); else vlv_dphy_param_init(intel_dsi); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 0042a7f69387..a6c82482a841 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -26,7 +26,6 @@ */ #include <linux/i2c.h> #include <linux/slab.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include "intel_drv.h" @@ -235,9 +234,9 @@ intel_dvo_mode_valid(struct drm_connector *connector, return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } -static bool intel_dvo_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_dvo *intel_dvo = enc_to_dvo(encoder); const struct drm_display_mode *fixed_mode = @@ -254,10 +253,11 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(fixed_mode, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + + return 0; } static void intel_dvo_pre_enable(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index af2873403009..d0427c2e3997 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -25,6 +25,7 @@ #include <drm/drm_print.h> #include "i915_drv.h" +#include "i915_reset.h" #include "intel_ringbuffer.h" #include "intel_lrc.h" @@ -83,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = { #define MAX_MMIO_BASES 3 struct engine_info { unsigned int hw_id; - unsigned int uabi_id; u8 class; u8 instance; /* mmio bases table *must* be sorted in reverse gen order */ @@ -94,27 +94,24 @@ struct engine_info { }; static const struct engine_info intel_engines[] = { - [RCS] = { - .hw_id = RCS_HW, - .uabi_id = I915_EXEC_RENDER, + [RCS0] = { + .hw_id = RCS0_HW, .class = RENDER_CLASS, .instance = 0, .mmio_bases = { { .gen = 1, .base = RENDER_RING_BASE } }, }, - [BCS] = { - .hw_id = BCS_HW, - .uabi_id = I915_EXEC_BLT, + [BCS0] = { + .hw_id = BCS0_HW, .class = COPY_ENGINE_CLASS, .instance = 0, .mmio_bases = { { .gen = 6, .base = BLT_RING_BASE } }, }, - [VCS] = { - .hw_id = VCS_HW, - .uabi_id = I915_EXEC_BSD, + [VCS0] = { + .hw_id = VCS0_HW, .class = VIDEO_DECODE_CLASS, .instance = 0, .mmio_bases = { @@ -123,9 +120,8 @@ static const struct engine_info intel_engines[] = { { .gen = 4, .base = BSD_RING_BASE } }, }, - [VCS2] = { - .hw_id = VCS2_HW, - .uabi_id = I915_EXEC_BSD, + [VCS1] = { + .hw_id = VCS1_HW, .class = VIDEO_DECODE_CLASS, .instance = 1, .mmio_bases = { @@ -133,27 +129,24 @@ static const struct engine_info intel_engines[] = { { .gen = 8, .base = GEN8_BSD2_RING_BASE } }, }, - [VCS3] = { - .hw_id = VCS3_HW, - .uabi_id = I915_EXEC_BSD, + [VCS2] = { + .hw_id = VCS2_HW, .class = VIDEO_DECODE_CLASS, .instance = 2, .mmio_bases = { { .gen = 11, .base = GEN11_BSD3_RING_BASE } }, }, - [VCS4] = { - .hw_id = VCS4_HW, - .uabi_id = I915_EXEC_BSD, + [VCS3] = { + .hw_id = VCS3_HW, .class = VIDEO_DECODE_CLASS, .instance = 3, .mmio_bases = { { .gen = 11, .base = GEN11_BSD4_RING_BASE } }, }, - [VECS] = { - .hw_id = VECS_HW, - .uabi_id = I915_EXEC_VEBOX, + [VECS0] = { + .hw_id = VECS0_HW, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 0, .mmio_bases = { @@ -161,9 +154,8 @@ static const struct engine_info intel_engines[] = { { .gen = 7, .base = VEBOX_RING_BASE } }, }, - [VECS2] = { - .hw_id = VECS2_HW, - .uabi_id = I915_EXEC_VEBOX, + [VECS1] = { + .hw_id = VECS1_HW, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 1, .mmio_bases = { @@ -261,6 +253,27 @@ static void __sprint_engine_name(char *name, const struct engine_info *info) info->instance) >= INTEL_ENGINE_CS_MAX_NAME); } +void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) +{ + /* + * Though they added more rings on g4x/ilk, they did not add + * per-engine HWSTAM until gen6. + */ + if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS) + return; + + if (INTEL_GEN(engine->i915) >= 3) + ENGINE_WRITE(engine, RING_HWSTAM, mask); + else + ENGINE_WRITE16(engine, RING_HWSTAM, mask); +} + +static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) +{ + /* Mask off all writes into the unknown HWSP */ + intel_engine_set_hwsp_writemask(engine, ~0u); +} + static int intel_engine_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) @@ -287,15 +300,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv, if (!engine) return -ENOMEM; + BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); + engine->id = id; + engine->mask = BIT(id); engine->i915 = dev_priv; + engine->uncore = &dev_priv->uncore; __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); engine->class = info->class; engine->instance = info->instance; - engine->uabi_id = info->uabi_id; engine->uabi_class = intel_engine_classes[info->class].uabi_class; engine->context_size = __intel_engine_context_size(dev_priv, @@ -312,6 +328,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv, ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); + /* Scrub mmio state on takeover */ + intel_engine_sanitize_mmio(engine); + dev_priv->engine_class[info->class][info->instance] = engine; dev_priv->engine[id] = engine; return 0; @@ -326,15 +345,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv, int intel_engines_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *device_info = mkwrite_device_info(dev_priv); - const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; + const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask; struct intel_engine_cs *engine; enum intel_engine_id id; unsigned int mask = 0; unsigned int i; int err; - WARN_ON(ring_mask == 0); - WARN_ON(ring_mask & + WARN_ON(engine_mask == 0); + WARN_ON(engine_mask & GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); if (i915_inject_load_failure()) @@ -348,7 +367,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) if (err) goto cleanup; - mask |= ENGINE_MASK(i); + mask |= BIT(i); } /* @@ -356,16 +375,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) * are added to the driver by a warning and disabling the forgotten * engines. */ - if (WARN_ON(mask != ring_mask)) - device_info->ring_mask = mask; + if (WARN_ON(mask != engine_mask)) + device_info->engine_mask = mask; /* We always presume we have at least RCS available for later probing */ - if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { + if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) { err = -ENODEV; goto cleanup; } - device_info->num_rings = hweight32(mask); + RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask); i915_check_and_clear_faults(dev_priv); @@ -426,36 +445,6 @@ cleanup: return err; } -void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - struct drm_i915_private *dev_priv = engine->i915; - - /* Our semaphore implementation is strictly monotonic (i.e. we proceed - * so long as the semaphore value in the register/page is greater - * than the sync value), so whenever we reset the seqno, - * so long as we reset the tracking semaphore value to 0, it will - * always be before the next request's seqno. If we don't reset - * the semaphore value, then when the seqno moves backwards all - * future waits will complete instantly (causing rendering corruption). - */ - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { - I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); - I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); - if (HAS_VEBOX(dev_priv)) - I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); - } - - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); - - /* After manually advancing the seqno, fake the interrupt in case - * there are any waiters for that seqno. - */ - intel_engine_wakeup(engine); - - GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); -} - static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) { i915_gem_batch_pool_init(&engine->batch_pool, engine); @@ -469,59 +458,74 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); - execlists->queue_priority = INT_MIN; + execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; } -/** - * intel_engines_setup_common - setup engine state not requiring hw access - * @engine: Engine to setup. - * - * Initializes @engine@ structure members shared between legacy and execlists - * submission modes which do not require hardware access. - * - * Typically done early in the submission mode specific engine setup stage. - */ -void intel_engine_setup_common(struct intel_engine_cs *engine) +static void cleanup_status_page(struct intel_engine_cs *engine) { - i915_timeline_init(engine->i915, &engine->timeline, engine->name); - i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); + struct i915_vma *vma; - intel_engine_init_execlist(engine); - intel_engine_init_hangcheck(engine); - intel_engine_init_batch_pool(engine); - intel_engine_init_cmd_parser(engine); + /* Prevent writes into HWSP after returning the page to the system */ + intel_engine_set_hwsp_writemask(engine, ~0u); + + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; + + if (!HWS_NEEDS_PHYSICAL(engine->i915)) + i915_vma_unpin(vma); + + i915_gem_object_unpin_map(vma->obj); + __i915_gem_object_release_unless_active(vma->obj); } -static void cleanup_status_page(struct intel_engine_cs *engine) +static int pin_ggtt_status_page(struct intel_engine_cs *engine, + struct i915_vma *vma) { - if (HWS_NEEDS_PHYSICAL(engine->i915)) { - void *addr = fetch_and_zero(&engine->status_page.page_addr); + unsigned int flags; - __free_page(virt_to_page(addr)); - } + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* + * On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; - i915_vma_unpin_and_release(&engine->status_page.vma, - I915_VMA_RELEASE_MAP); + return i915_vma_pin(vma, 0, 0, flags); } static int init_status_page(struct intel_engine_cs *engine) { struct drm_i915_gem_object *obj; struct i915_vma *vma; - unsigned int flags; void *vaddr; int ret; + /* + * Though the HWS register does support 36bit addresses, historically + * we have had hangs and corruption reported due to wild writes if + * the HWS is placed above 4G. We only allow objects to be allocated + * in GFP_DMA32 for i965, and no earlier physical address users had + * access to more than 4G. + */ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); return PTR_ERR(obj); } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -529,65 +533,170 @@ static int init_status_page(struct intel_engine_cs *engine) goto err; } - flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actually map it). - */ - flags |= PIN_MAPPABLE; - else - flags |= PIN_HIGH; - ret = i915_vma_pin(vma, 0, 0, flags); - if (ret) - goto err; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto err_unpin; + goto err; } + engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); engine->status_page.vma = vma; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma); - engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); + + if (!HWS_NEEDS_PHYSICAL(engine->i915)) { + ret = pin_ggtt_status_page(engine, vma); + if (ret) + goto err_unpin; + } + return 0; err_unpin: - i915_vma_unpin(vma); + i915_gem_object_unpin_map(obj); err: i915_gem_object_put(obj); return ret; } -static int init_phys_status_page(struct intel_engine_cs *engine) +/** + * intel_engines_setup_common - setup engine state not requiring hw access + * @engine: Engine to setup. + * + * Initializes @engine@ structure members shared between legacy and execlists + * submission modes which do not require hardware access. + * + * Typically done early in the submission mode specific engine setup stage. + */ +int intel_engine_setup_common(struct intel_engine_cs *engine) { - struct page *page; + int err; - /* - * Though the HWS register does support 36bit addresses, historically - * we have had hangs and corruption reported due to wild writes if - * the HWS is placed above 4G. - */ - page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO); - if (!page) - return -ENOMEM; + err = init_status_page(engine); + if (err) + return err; - engine->status_page.page_addr = page_address(page); + err = i915_timeline_init(engine->i915, + &engine->timeline, + engine->status_page.vma); + if (err) + goto err_hwsp; + + i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); + + intel_engine_init_breadcrumbs(engine); + intel_engine_init_execlist(engine); + intel_engine_init_hangcheck(engine); + intel_engine_init_batch_pool(engine); + intel_engine_init_cmd_parser(engine); return 0; + +err_hwsp: + cleanup_status_page(engine); + return err; } -static void __intel_context_unpin(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) { - intel_context_unpin(to_intel_context(ctx, engine)); + static const struct { + u8 engine; + u8 sched; + } map[] = { +#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(PREEMPTION, PREEMPTION), + MAP(SEMAPHORES, SEMAPHORES), +#undef MAP + }; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 enabled, disabled; + + enabled = 0; + disabled = 0; + for_each_engine(engine, i915, id) { /* all engines must agree! */ + int i; + + if (engine->schedule) + enabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + else + disabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + + for (i = 0; i < ARRAY_SIZE(map); i++) { + if (engine->flags & BIT(map[i].engine)) + enabled |= BIT(map[i].sched); + else + disabled |= BIT(map[i].sched); + } + } + + i915->caps.scheduler = enabled & ~disabled; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) + i915->caps.scheduler = 0; +} + +struct measure_breadcrumb { + struct i915_request rq; + struct i915_timeline timeline; + struct intel_ring ring; + u32 cs[1024]; +}; + +static int measure_breadcrumb_dw(struct intel_engine_cs *engine) +{ + struct measure_breadcrumb *frame; + int dw = -ENOMEM; + + GEM_BUG_ON(!engine->i915->gt.scratch); + + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + return -ENOMEM; + + if (i915_timeline_init(engine->i915, + &frame->timeline, + engine->status_page.vma)) + goto out_frame; + + INIT_LIST_HEAD(&frame->ring.request_list); + frame->ring.timeline = &frame->timeline; + frame->ring.vaddr = frame->cs; + frame->ring.size = sizeof(frame->cs); + frame->ring.effective_size = frame->ring.size; + intel_ring_update_space(&frame->ring); + + frame->rq.i915 = engine->i915; + frame->rq.engine = engine; + frame->rq.ring = &frame->ring; + frame->rq.timeline = &frame->timeline; + + dw = i915_timeline_pin(&frame->timeline); + if (dw < 0) + goto out_timeline; + + dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; + + i915_timeline_unpin(&frame->timeline); + +out_timeline: + i915_timeline_fini(&frame->timeline); +out_frame: + kfree(frame); + return dw; +} + +static int pin_context(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context **out) +{ + struct intel_context *ce; + + ce = intel_context_pin(ctx, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + *out = ce; + return 0; } /** @@ -604,11 +713,8 @@ static void __intel_context_unpin(struct i915_gem_context *ctx, int intel_engine_init_common(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - struct intel_context *ce; int ret; - engine->set_default_submission(engine); - /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -616,43 +722,34 @@ int intel_engine_init_common(struct intel_engine_cs *engine) * be available. To avoid this we always pin the default * context. */ - ce = intel_context_pin(i915->kernel_context, engine); - if (IS_ERR(ce)) - return PTR_ERR(ce); + ret = pin_context(i915->kernel_context, engine, + &engine->kernel_context); + if (ret) + return ret; /* * Similarly the preempt context must always be available so that - * we can interrupt the engine at any time. + * we can interrupt the engine at any time. However, as preemption + * is optional, we allow it to fail. */ - if (i915->preempt_context) { - ce = intel_context_pin(i915->preempt_context, engine); - if (IS_ERR(ce)) { - ret = PTR_ERR(ce); - goto err_unpin_kernel; - } - } + if (i915->preempt_context) + pin_context(i915->preempt_context, engine, + &engine->preempt_context); - ret = intel_engine_init_breadcrumbs(engine); - if (ret) - goto err_unpin_preempt; + ret = measure_breadcrumb_dw(engine); + if (ret < 0) + goto err_unpin; - if (HWS_NEEDS_PHYSICAL(i915)) - ret = init_phys_status_page(engine); - else - ret = init_status_page(engine); - if (ret) - goto err_breadcrumbs; + engine->emit_fini_breadcrumb_dw = ret; - return 0; + engine->set_default_submission(engine); -err_breadcrumbs: - intel_engine_fini_breadcrumbs(engine); -err_unpin_preempt: - if (i915->preempt_context) - __intel_context_unpin(i915->preempt_context, engine); + return 0; -err_unpin_kernel: - __intel_context_unpin(i915->kernel_context, engine); +err_unpin: + if (engine->preempt_context) + intel_context_unpin(engine->preempt_context); + intel_context_unpin(engine->kernel_context); return ret; } @@ -665,8 +762,6 @@ err_unpin_kernel: */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); @@ -676,9 +771,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (i915->preempt_context) - __intel_context_unpin(i915->preempt_context, engine); - __intel_context_unpin(i915->kernel_context, engine); + if (engine->preempt_context) + intel_context_unpin(engine->preempt_context); + intel_context_unpin(engine->kernel_context); i915_timeline_fini(&engine->timeline); @@ -689,50 +784,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; + u64 acthd; - if (INTEL_GEN(dev_priv) >= 8) - acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), - RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_GEN(dev_priv) >= 4) - acthd = I915_READ(RING_ACTHD(engine->mmio_base)); + if (INTEL_GEN(i915) >= 8) + acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); + else if (INTEL_GEN(i915) >= 4) + acthd = ENGINE_READ(engine, RING_ACTHD); else - acthd = I915_READ(ACTHD); + acthd = ENGINE_READ(engine, ACTHD); return acthd; } u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; u64 bbaddr; - if (INTEL_GEN(dev_priv) >= 8) - bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), - RING_BBADDR_UDW(engine->mmio_base)); + if (INTEL_GEN(engine->i915) >= 8) + bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); else - bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); + bbaddr = ENGINE_READ(engine, RING_BBADDR); return bbaddr; } int intel_engine_stop_cs(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct intel_uncore *uncore = engine->uncore; const u32 base = engine->mmio_base; const i915_reg_t mode = RING_MI_MODE(base); int err; - if (INTEL_GEN(dev_priv) < 3) + if (INTEL_GEN(engine->i915) < 3) return -ENODEV; GEM_TRACE("%s\n", engine->name); - I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); + intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); err = 0; - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(uncore, mode, MODE_IDLE, MODE_IDLE, 1000, 0, NULL)) { @@ -741,19 +834,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) } /* A final mmio read to let GPU writes be hopefully flushed to memory */ - POSTING_READ_FW(mode); + intel_uncore_posting_read_fw(uncore, mode); return err; } void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - GEM_TRACE("%s\n", engine->name); - I915_WRITE_FW(RING_MI_MODE(engine->mmio_base), - _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } const char *i915_cache_level_str(struct drm_i915_private *i915, int type) @@ -769,12 +859,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { - const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 mcr_s_ss_select; u32 slice = fls(sseu->slice_mask); u32 subslice = fls(sseu->subslice_mask[slice]); - if (IS_GEN10(dev_priv)) + if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); else if (INTEL_GEN(dev_priv) >= 11) @@ -786,15 +876,16 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) return mcr_s_ss_select; } -static inline uint32_t +static inline u32 read_subslice_reg(struct drm_i915_private *dev_priv, int slice, int subslice, i915_reg_t reg) { - uint32_t mcr_slice_subslice_mask; - uint32_t mcr_slice_subslice_select; - uint32_t default_mcr_s_ss_select; - uint32_t mcr; - uint32_t ret; + struct intel_uncore *uncore = &dev_priv->uncore; + u32 mcr_slice_subslice_mask; + u32 mcr_slice_subslice_select; + u32 default_mcr_s_ss_select; + u32 mcr; + u32 ret; enum forcewake_domains fw_domains; if (INTEL_GEN(dev_priv) >= 11) { @@ -811,33 +902,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + fw_domains |= intel_uncore_forcewake_for_reg(uncore, GEN8_MCR_SELECTOR, FW_REG_READ | FW_REG_WRITE); - spin_lock_irq(&dev_priv->uncore.lock); - intel_uncore_forcewake_get__locked(dev_priv, fw_domains); + spin_lock_irq(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw_domains); - mcr = I915_READ_FW(GEN8_MCR_SELECTOR); + mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != default_mcr_s_ss_select); mcr &= ~mcr_slice_subslice_mask; mcr |= mcr_slice_subslice_select; - I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - ret = I915_READ_FW(reg); + ret = intel_uncore_read_fw(uncore, reg); mcr &= ~mcr_slice_subslice_mask; mcr |= default_mcr_s_ss_select; - I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - intel_uncore_forcewake_put__locked(dev_priv, fw_domains); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irq(&uncore->lock); return ret; } @@ -847,6 +938,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *dev_priv = engine->i915; + struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; int subslice; @@ -855,12 +947,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, switch (INTEL_GEN(dev_priv)) { default: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); - if (engine->id != RCS) + if (engine->id != RCS0) break; - instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); for_each_instdone_slice_subslice(dev_priv, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(dev_priv, slice, subslice, @@ -871,28 +965,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, } break; case 7: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); - if (engine->id != RCS) + if (engine->id != RCS0) break; - instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); - instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); + instdone->sampler[0][0] = + intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); + instdone->row[0][0] = + intel_uncore_read(uncore, GEN7_ROW_INSTDONE); break; case 6: case 5: case 4: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); - - if (engine->id == RCS) + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); + if (engine->id == RCS0) /* HACK: Using the wrong struct member */ - instdone->slice_common = I915_READ(GEN4_INSTDONE1); + instdone->slice_common = + intel_uncore_read(uncore, GEN4_INSTDONE1); break; case 3: case 2: - instdone->instdone = I915_READ(GEN2_INSTDONE); + instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); break; } } @@ -900,22 +999,28 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, static bool ring_is_idle(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + intel_wakeref_t wakeref; bool idle = true; + if (I915_SELFTEST_ONLY(!engine->mmio_base)) + return true; + /* If the whole device is asleep, the engine must be idle */ - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return true; /* First check that no commands are left in the ring */ - if ((I915_READ_HEAD(engine) & HEAD_ADDR) != - (I915_READ_TAIL(engine) & TAIL_ADDR)) + if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != + (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) idle = false; /* No bit for gen2, so assume the CS parser is idle */ - if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) + if (INTEL_GEN(dev_priv) > 2 && + !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return idle; } @@ -929,17 +1034,8 @@ static bool ring_is_idle(struct intel_engine_cs *engine) */ bool intel_engine_is_idle(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - /* More white lies, if wedged, hw state is inconsistent */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) - return true; - - /* Any inflight/incomplete requests? */ - if (!intel_engine_signaled(engine, intel_engine_last_submit(engine))) - return false; - - if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) + if (i915_reset_failed(engine->i915)) return true; /* Waiting to drain ELSP? */ @@ -967,13 +1063,10 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return false; /* Ring stopped? */ - if (!ring_is_idle(engine)) - return false; - - return true; + return ring_is_idle(engine); } -bool intel_engines_are_idle(struct drm_i915_private *dev_priv) +bool intel_engines_are_idle(struct drm_i915_private *i915) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -982,10 +1075,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) * If the driver is wedged, HW state may be very inconsistent and * report that it is still busy, even though we have stopped using it. */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_reset_failed(i915)) return true; - for_each_engine(engine, dev_priv, id) { + /* Already parked (and passed an idleness test); must still be idle */ + if (!READ_ONCE(i915->gt.awake)) + return true; + + for_each_engine(engine, i915, id) { if (!intel_engine_is_idle(engine)) return false; } @@ -993,34 +1090,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) return true; } -/** - * intel_engine_has_kernel_context: - * @engine: the engine - * - * Returns true if the last context to be executed on this engine, or has been - * executed if the engine is already idle, is the kernel context - * (#i915.kernel_context). - */ -bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) -{ - const struct intel_context *kernel_context = - to_intel_context(engine->i915->kernel_context, engine); - struct i915_request *rq; - - lockdep_assert_held(&engine->i915->drm.struct_mutex); - - /* - * Check the last context seen by the engine. If active, it will be - * the last request that remains in the timeline. When idle, it is - * the last executed context as tracked by retirement. - */ - rq = __i915_gem_active_peek(&engine->timeline.last_request); - if (rq) - return rq->hw_context == kernel_context; - else - return engine->last_retired_context == kernel_context; -} - void intel_engines_reset_default_submission(struct drm_i915_private *i915) { struct intel_engine_cs *engine; @@ -1030,26 +1099,36 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915) engine->set_default_submission(engine); } +static bool reset_engines(struct drm_i915_private *i915) +{ + if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + return false; + + return intel_gpu_reset(i915, ALL_ENGINES) == 0; +} + /** * intel_engines_sanitize: called after the GPU has lost power * @i915: the i915 device + * @force: ignore a failed reset and sanitize engine state anyway * * Anytime we reset the GPU, either with an explicit GPU reset or through a * PCI power cycle, the GPU loses state and we must reset our state tracking * to match. Note that calling intel_engines_sanitize() if the GPU has not * been reset results in much confusion! */ -void intel_engines_sanitize(struct drm_i915_private *i915) +void intel_engines_sanitize(struct drm_i915_private *i915, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; GEM_TRACE("\n"); - for_each_engine(engine, i915, id) { - if (engine->reset.reset) - engine->reset.reset(engine, NULL); - } + if (!reset_engines(i915) && !force) + return; + + for_each_engine(engine, i915, id) + intel_engine_reset(engine, false); } /** @@ -1085,7 +1164,7 @@ void intel_engines_park(struct drm_i915_private *i915) } /* Must be reset upon idling, or we may miss the busy wakeup. */ - GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN); + GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); if (engine->park) engine->park(engine); @@ -1098,6 +1177,8 @@ void intel_engines_park(struct drm_i915_private *i915) i915_gem_batch_pool_fini(&engine->batch_pool); engine->execlists.no_priolist = false; } + + i915->gt.active_engines = 0; } /** @@ -1201,11 +1282,14 @@ static void print_request(struct drm_printer *m, x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); - drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", + drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n", prefix, - rq->global_seqno, - i915_request_completed(rq) ? "!" : "", rq->fence.context, rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags) ? "+" : "", buf, jiffies_to_msecs(jiffies - rq->emitted_jiffies), name); @@ -1248,35 +1332,26 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, &engine->execlists; u64 addr; - if (engine->id == RCS && IS_GEN(dev_priv, 4, 7)) - drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); + if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7)) + drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); drm_printf(m, "\tRING_START: 0x%08x\n", - I915_READ(RING_START(engine->mmio_base))); + ENGINE_READ(engine, RING_START)); drm_printf(m, "\tRING_HEAD: 0x%08x\n", - I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); + ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); drm_printf(m, "\tRING_TAIL: 0x%08x\n", - I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); + ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); drm_printf(m, "\tRING_CTL: 0x%08x%s\n", - I915_READ(RING_CTL(engine->mmio_base)), - I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); if (INTEL_GEN(engine->i915) > 2) { drm_printf(m, "\tRING_MODE: 0x%08x%s\n", - I915_READ(RING_MI_MODE(engine->mmio_base)), - I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); + ENGINE_READ(engine, RING_MI_MODE), + ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); } if (INTEL_GEN(dev_priv) >= 6) { - drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); - } - - if (HAS_LEGACY_SEMAPHORES(dev_priv)) { - drm_printf(m, "\tSYNC_0: 0x%08x\n", - I915_READ(RING_SYNC_0(engine->mmio_base))); - drm_printf(m, "\tSYNC_1: 0x%08x\n", - I915_READ(RING_SYNC_1(engine->mmio_base))); - if (HAS_VEBOX(dev_priv)) - drm_printf(m, "\tSYNC_2: 0x%08x\n", - I915_READ(RING_SYNC_2(engine->mmio_base))); + drm_printf(m, "\tRING_IMR: %08x\n", + ENGINE_READ(engine, RING_IMR)); } addr = intel_engine_get_active_head(engine); @@ -1286,39 +1361,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); if (INTEL_GEN(dev_priv) >= 8) - addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base), - RING_DMA_FADD_UDW(engine->mmio_base)); + addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); else if (INTEL_GEN(dev_priv) >= 4) - addr = I915_READ(RING_DMA_FADD(engine->mmio_base)); + addr = ENGINE_READ(engine, RING_DMA_FADD); else - addr = I915_READ(DMA_FADD_I8XX); + addr = ENGINE_READ(engine, DMA_FADD_I8XX); drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); if (INTEL_GEN(dev_priv) >= 4) { drm_printf(m, "\tIPEIR: 0x%08x\n", - I915_READ(RING_IPEIR(engine->mmio_base))); + ENGINE_READ(engine, RING_IPEIR)); drm_printf(m, "\tIPEHR: 0x%08x\n", - I915_READ(RING_IPEHR(engine->mmio_base))); + ENGINE_READ(engine, RING_IPEHR)); } else { - drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR)); - drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR)); + drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); + drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); } if (HAS_EXECLISTS(dev_priv)) { - const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + const u32 *hws = + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; unsigned int idx; u8 read, write; drm_printf(m, "\tExeclist status: 0x%08x %08x\n", - I915_READ(RING_EXECLIST_STATUS_LO(engine)), - I915_READ(RING_EXECLIST_STATUS_HI(engine))); + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); read = execlists->csb_head; write = READ_ONCE(*execlists->csb_write); drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n", read, write, - GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))), + GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1333,9 +1408,13 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n", idx, hws[idx * 2], - I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + ENGINE_READ_IDX(engine, + RING_CONTEXT_STATUS_BUF_LO, + idx), hws[idx * 2 + 1], - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); + ENGINE_READ_IDX(engine, + RING_CONTEXT_STATUS_BUF_HI, + idx)); } rcu_read_lock(); @@ -1348,9 +1427,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, char hdr[80]; snprintf(hdr, sizeof(hdr), - "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ", + "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", idx, count, - i915_ggtt_offset(rq->ring->vma)); + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); print_request(m, rq, hdr); } else { drm_printf(m, "\t\tELSP[%d] idle\n", idx); @@ -1360,11 +1441,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, rcu_read_unlock(); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE(engine))); + ENGINE_READ(engine, RING_PP_DIR_BASE)); drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE_READ(engine))); + ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", - I915_READ(RING_PP_DIR_DCLV(engine))); + ENGINE_READ(engine, RING_PP_DIR_DCLV)); } } @@ -1405,14 +1486,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) { - const int MAX_REQUESTS_TO_SHOW = 8; - struct intel_breadcrumbs * const b = &engine->breadcrumbs; - const struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_gpu_error * const error = &engine->i915->gpu_error; - struct i915_request *rq, *last; - unsigned long flags; - struct rb_node *rb; - int count; + struct i915_request *rq; + intel_wakeref_t wakeref; if (header) { va_list ap; @@ -1422,13 +1498,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, va_end(ap); } - if (i915_terminally_wedged(&engine->i915->gpu_error)) + if (i915_reset_failed(engine->i915)) drm_printf(m, "*** WEDGED ***\n"); - drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", - intel_engine_get_seqno(engine), - intel_engine_last_submit(engine), - engine->hangcheck.seqno, + drm_printf(m, "\tHangcheck %x:%x [%d ms]\n", + engine->hangcheck.last_seqno, + engine->hangcheck.next_seqno, jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), @@ -1448,7 +1523,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, if (&rq->link != &engine->timeline.requests) print_request(m, rq, "\t\tlast "); - rq = i915_gem_find_active_request(engine); + rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1462,85 +1537,30 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->ring->emit); drm_printf(m, "\t\tring->space: 0x%08x\n", rq->ring->space); + drm_printf(m, "\t\tring->hwsp: 0x%08x\n", + rq->timeline->hwsp_offset); print_request_ring(m, rq); } rcu_read_unlock(); - if (intel_runtime_pm_get_if_in_use(engine->i915)) { + wakeref = intel_runtime_pm_get_if_in_use(engine->i915); + if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(engine->i915); + intel_runtime_pm_put(engine->i915, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } - local_irq_save(flags); - spin_lock(&engine->timeline.lock); - - last = NULL; - count = 0; - list_for_each_entry(rq, &engine->timeline.requests, link) { - if (count++ < MAX_REQUESTS_TO_SHOW - 1) - print_request(m, rq, "\t\tE "); - else - last = rq; - } - if (last) { - if (count > MAX_REQUESTS_TO_SHOW) { - drm_printf(m, - "\t\t...skipping %d executing requests...\n", - count - MAX_REQUESTS_TO_SHOW); - } - print_request(m, last, "\t\tE "); - } - - last = NULL; - count = 0; - drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); - for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); - int i; - - priolist_for_each_request(rq, p, i) { - if (count++ < MAX_REQUESTS_TO_SHOW - 1) - print_request(m, rq, "\t\tQ "); - else - last = rq; - } - } - if (last) { - if (count > MAX_REQUESTS_TO_SHOW) { - drm_printf(m, - "\t\t...skipping %d queued requests...\n", - count - MAX_REQUESTS_TO_SHOW); - } - print_request(m, last, "\t\tQ "); - } - - spin_unlock(&engine->timeline.lock); - - spin_lock(&b->rb_lock); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = rb_entry(rb, typeof(*w), node); - - drm_printf(m, "\t%s [%d:%c] waiting for %x\n", - w->tsk->comm, w->tsk->pid, - task_state_to_char(w->tsk), - w->seqno); - } - spin_unlock(&b->rb_lock); - local_irq_restore(flags); - - drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n", - engine->irq_posted, - yesno(test_bit(ENGINE_IRQ_BREADCRUMB, - &engine->irq_posted))); + intel_execlists_show_requests(engine, m, print_request, 8); drm_printf(m, "HWSP:\n"); - hexdump(m, engine->status_page.page_addr, PAGE_SIZE); + hexdump(m, engine->status_page.addr, PAGE_SIZE); drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); + + intel_engine_print_breadcrumbs(engine, m); } static u8 user_class_map[] = { @@ -1670,6 +1690,50 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine) write_sequnlock_irqrestore(&engine->stats.lock, flags); } +static bool match_ring(struct i915_request *rq) +{ + u32 ring = ENGINE_READ(rq->engine, RING_START); + + return ring == i915_ggtt_offset(rq->ring->vma); +} + +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine) +{ + struct i915_request *request, *active = NULL; + unsigned long flags; + + /* + * We are called by the error capture, reset and to dump engine + * state at random points in time. In particular, note that neither is + * crucially ordered with an interrupt. After a hang, the GPU is dead + * and we assume that no more writes can happen (we waited long enough + * for all writes that were in transaction to be flushed) - adding an + * extra delay for a recent interrupt is pointless. Hence, we do + * not need an engine->irq_seqno_barrier() before the seqno reads. + * At all other times, we must assume the GPU is still running, but + * we only care about the snapshot of this moment. + */ + spin_lock_irqsave(&engine->timeline.lock, flags); + list_for_each_entry(request, &engine->timeline.requests, link) { + if (i915_request_completed(request)) + continue; + + if (!i915_request_started(request)) + break; + + /* More than one preemptible request may match! */ + if (!match_ring(request)) + break; + + active = request; + break; + } + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + return active; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_engine.c" #include "selftests/intel_engine_cs.c" diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h new file mode 100644 index 000000000000..6c6d8a9aca94 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_engine_types.h @@ -0,0 +1,541 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_ENGINE_TYPES__ +#define __INTEL_ENGINE_TYPES__ + +#include <linux/hashtable.h> +#include <linux/irq_work.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/types.h> + +#include "i915_gem.h" +#include "i915_priolist_types.h" +#include "i915_selftest.h" +#include "i915_timeline_types.h" +#include "intel_workarounds_types.h" + +#include "i915_gem_batch_pool.h" +#include "i915_pmu.h" + +#define I915_MAX_SLICES 3 +#define I915_MAX_SUBSLICES 8 + +#define I915_CMD_HASH_ORDER 9 + +struct dma_fence; +struct drm_i915_reg_table; +struct i915_gem_context; +struct i915_request; +struct i915_sched_attr; +struct intel_uncore; + +typedef u8 intel_engine_mask_t; +#define ALL_ENGINES ((intel_engine_mask_t)~0ul) + +struct intel_hw_status_page { + struct i915_vma *vma; + u32 *addr; +}; + +struct intel_instdone { + u32 instdone; + /* The following exist only in the RCS engine */ + u32 slice_common; + u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; + u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; +}; + +struct intel_engine_hangcheck { + u64 acthd; + u32 last_seqno; + u32 next_seqno; + unsigned long action_timestamp; + struct intel_instdone instdone; +}; + +struct intel_ring { + struct kref ref; + struct i915_vma *vma; + void *vaddr; + + struct i915_timeline *timeline; + struct list_head request_list; + struct list_head active_link; + + u32 head; + u32 tail; + u32 emit; + + u32 space; + u32 size; + u32 effective_size; +}; + +/* + * we use a single page to load ctx workarounds so all of these + * values are referred in terms of dwords + * + * struct i915_wa_ctx_bb: + * offset: specifies batch starting position, also helpful in case + * if we want to have multiple batches at different offsets based on + * some criteria. It is not a requirement at the moment but provides + * an option for future use. + * size: size of the batch in DWORDS + */ +struct i915_ctx_workarounds { + struct i915_wa_ctx_bb { + u32 offset; + u32 size; + } indirect_ctx, per_ctx; + struct i915_vma *vma; +}; + +#define I915_MAX_VCS 4 +#define I915_MAX_VECS 2 + +/* + * Engine IDs definitions. + * Keep instances of the same type engine together. + */ +enum intel_engine_id { + RCS0 = 0, + BCS0, + VCS0, + VCS1, + VCS2, + VCS3, +#define _VCS(n) (VCS0 + (n)) + VECS0, + VECS1, +#define _VECS(n) (VECS0 + (n)) + I915_NUM_ENGINES +}; + +struct st_preempt_hang { + struct completion completion; + unsigned int count; + bool inject_hang; +}; + +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @submit_reg: gen-specific execlist submission register + * set to the ExecList Submission Port (elsp) register pre-Gen11 and to + * the ExecList Submission Queue Contents register array for Gen11+ + */ + u32 __iomem *submit_reg; + + /** + * @ctrl_reg: the enhanced execlists control register, used to load the + * submit queue on the HW and to request preemptions to idle + */ + u32 __iomem *ctrl_reg; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct i915_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, execlists) ((p) - (execlists)->port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + +#define EXECLIST_MAX_PORTS 2 + } port[EXECLIST_MAX_PORTS]; + + /** + * @active: is the HW active? We consider the HW as active after + * submitting any context for execution and until we have seen the + * last context completion event. After that, we do not expect any + * more events until we submit, and so can park the HW. + * + * As we have a small number of different sources from which we feed + * the HW, we track the state of each inside a single bitfield. + */ + unsigned int active; +#define EXECLISTS_ACTIVE_USER 0 +#define EXECLISTS_ACTIVE_PREEMPT 1 +#define EXECLISTS_ACTIVE_HWACK 2 + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @queue_priority_hint: Highest pending priority. + * + * When we add requests into the queue, or adjust the priority of + * executing requests, we compute the maximum priority of those + * pending requests. We can then use this value to determine if + * we need to preempt the executing requests to service the queue. + * However, since the we may have recorded the priority of an inflight + * request we wanted to preempt but since completed, at the time of + * dequeuing the priority hint may no longer may match the highest + * available request priority. + */ + int queue_priority_hint; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root_cached queue; + + /** + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. + */ + u32 *csb_write; + + /** + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. + */ + u32 *csb_status; + + /** + * @preempt_complete_status: expected CSB upon completing preemption + */ + u32 preempt_complete_status; + + /** + * @csb_head: context status buffer head + */ + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) +}; + +#define INTEL_ENGINE_CS_MAX_NAME 8 + +struct intel_engine_cs { + struct drm_i915_private *i915; + struct intel_uncore *uncore; + char name[INTEL_ENGINE_CS_MAX_NAME]; + + enum intel_engine_id id; + unsigned int hw_id; + unsigned int guc_id; + intel_engine_mask_t mask; + + u8 uabi_class; + + u8 class; + u8 instance; + u32 context_size; + u32 mmio_base; + + struct intel_ring *buffer; + + struct i915_timeline timeline; + + struct intel_context *kernel_context; /* pinned */ + struct intel_context *preempt_context; /* pinned; optional */ + + struct drm_i915_gem_object *default_state; + void *pinned_default_state; + + /* Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ + struct intel_breadcrumbs { + spinlock_t irq_lock; + struct list_head signalers; + + struct irq_work irq_work; /* for use from inside irq_lock */ + + unsigned int irq_enabled; + + bool irq_armed; + } breadcrumbs; + + struct intel_engine_pmu { + /** + * @enable: Bitmask of enable sample events on this engine. + * + * Bits correspond to sample event types, for instance + * I915_SAMPLE_QUEUED is bit 0 etc. + */ + u32 enable; + /** + * @enable_count: Reference count for the enabled samplers. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; + /** + * @sample: Counter values for sampling events. + * + * Our internal timer stores the current counters in this field. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; + } pmu; + + /* + * A pool of objects to use as shadow copies of client batch buffers + * when the command parser is enabled. Prevents the client from + * modifying the batch contents after software parsing. + */ + struct i915_gem_batch_pool batch_pool; + + struct intel_hw_status_page status_page; + struct i915_ctx_workarounds wa_ctx; + struct i915_wa_list ctx_wa_list; + struct i915_wa_list wa_list; + struct i915_wa_list whitelist; + + u32 irq_keep_mask; /* always keep these interrupts */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + void (*irq_enable)(struct intel_engine_cs *engine); + void (*irq_disable)(struct intel_engine_cs *engine); + + int (*init_hw)(struct intel_engine_cs *engine); + + struct { + void (*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, bool stalled); + void (*finish)(struct intel_engine_cs *engine); + } reset; + + void (*park)(struct intel_engine_cs *engine); + void (*unpark)(struct intel_engine_cs *engine); + + void (*set_default_submission)(struct intel_engine_cs *engine); + + const struct intel_context_ops *cops; + + int (*request_alloc)(struct i915_request *rq); + int (*init_context)(struct i915_request *rq); + + int (*emit_flush)(struct i915_request *request, u32 mode); +#define EMIT_INVALIDATE BIT(0) +#define EMIT_FLUSH BIT(1) +#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) + int (*emit_bb_start)(struct i915_request *rq, + u64 offset, u32 length, + unsigned int dispatch_flags); +#define I915_DISPATCH_SECURE BIT(0) +#define I915_DISPATCH_PINNED BIT(1) + int (*emit_init_breadcrumb)(struct i915_request *rq); + u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, + u32 *cs); + unsigned int emit_fini_breadcrumb_dw; + + /* Pass the request to the hardware queue (e.g. directly into + * the legacy ringbuffer or to the end of an execlist). + * + * This is called from an atomic context with irqs disabled; must + * be irq safe. + */ + void (*submit_request)(struct i915_request *rq); + + /* + * Call when the priority on a request has changed and it and its + * dependencies may need rescheduling. Note the request itself may + * not be ready to run! + */ + void (*schedule)(struct i915_request *request, + const struct i915_sched_attr *attr); + + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + + void (*cleanup)(struct intel_engine_cs *engine); + + struct intel_engine_execlists execlists; + + /* Contexts are pinned whilst they are active on the GPU. The last + * context executed remains active whilst the GPU is idle - the + * switch away and write to the context object only occurs on the + * next execution. Contexts are only unpinned on retirement of the + * following request ensuring that we can always write to the object + * on the context switch even after idling. Across suspend, we switch + * to the kernel context and trash it as the save may not happen + * before the hardware is powered down. + */ + struct intel_context *last_retired_context; + + /* status_notifier: list of callbacks for context-switch changes */ + struct atomic_notifier_head context_status_notifier; + + struct intel_engine_hangcheck hangcheck; + +#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) +#define I915_ENGINE_SUPPORTS_STATS BIT(1) +#define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_HAS_SEMAPHORES BIT(3) +#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) + unsigned int flags; + + /* + * Table of commands the command parser needs to know about + * for this engine. + */ + DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); + + /* + * Table of registers allowed in commands that read/write registers. + */ + const struct drm_i915_reg_table *reg_tables; + int reg_table_count; + + /* + * Returns the bitmask for the length field of the specified command. + * Return 0 for an unrecognized/invalid command. + * + * If the command parser finds an entry for a command in the engine's + * cmd_tables, it gets the command's length based on the table entry. + * If not, it calls this function to determine the per-engine length + * field encoding for the command (i.e. different opcode ranges use + * certain bits to encode the command length in the header). + */ + u32 (*get_cmd_length_mask)(u32 cmd_header); + + struct { + /** + * @lock: Lock protecting the below fields. + */ + seqlock_t lock; + /** + * @enabled: Reference count indicating number of listeners. + */ + unsigned int enabled; + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + /** + * @enabled_at: Timestamp when busy stats were enabled. + */ + ktime_t enabled_at; + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases + * where engine is currently busy (active > 0). + */ + ktime_t total; + } stats; +}; + +static inline bool +intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; +} + +static inline bool +intel_engine_supports_stats(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_SUPPORTS_STATS; +} + +static inline bool +intel_engine_has_preemption(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_PREEMPTION; +} + +static inline bool +intel_engine_has_semaphores(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_SEMAPHORES; +} + +static inline bool +intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; +} + +#define instdone_slice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) + +#define instdone_subslice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) + +#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ + for ((slice__) = 0, (subslice__) = 0; \ + (slice__) < I915_MAX_SLICES; \ + (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ + (slice__) += ((subslice__) == 0)) \ + for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ + (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) + +#endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index f23570c44323..43fe08be3b7d 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -38,6 +38,8 @@ * forcibly disable it to allow proper screen updates. */ +#include <drm/drm_fourcc.h> + #include "intel_drv.h" #include "i915_drv.h" @@ -84,7 +86,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, int lines; intel_fbc_get_plane_source_size(cache, NULL, &lines); - if (IS_GEN7(dev_priv)) + if (IS_GEN(dev_priv, 7)) lines = min(lines, 2048); else if (INTEL_GEN(dev_priv) >= 8) lines = min(lines, 2560); @@ -106,7 +108,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, FBC_STATUS, FBC_STAT_COMPRESSING, 0, 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); @@ -127,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) cfb_pitch = params->fb.stride; /* FBC_CTL wants 32B or 64B units */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) cfb_pitch = (cfb_pitch / 32) - 1; else cfb_pitch = (cfb_pitch / 64) - 1; @@ -136,7 +138,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) I915_WRITE(FBC_TAG(i), 0); - if (IS_GEN4(dev_priv)) { + if (IS_GEN(dev_priv, 4)) { u32 fbc_ctl2; /* Set it up... */ @@ -233,9 +235,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) if (params->flags & PLANE_HAS_FENCE) { dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) dpfc_ctl |= params->vma->fence->id; - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->vma->fence->id); @@ -243,7 +245,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) params->crtc.fence_y_offset); } } else { - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { I915_WRITE(SNB_DPFC_CTL_SA, 0); I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); } @@ -282,7 +284,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) int threshold = dev_priv->fbc.threshold; /* Display WA #0529: skl, kbl, bxt. */ - if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) { + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) { u32 val = I915_READ(CHICKEN_MISC_4); val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); @@ -581,10 +583,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, if (stride < 512) return false; - if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) return stride == 4096 || stride == 8192; - if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) return false; if (stride > 16384) @@ -594,7 +596,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, } static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, - uint32_t pixel_format) + u32 pixel_format) { switch (pixel_format) { case DRM_FORMAT_XRGB8888: @@ -603,7 +605,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) return false; /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(dev_priv)) @@ -626,7 +628,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; - if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { max_w = 4096; max_h = 4096; } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { @@ -784,7 +789,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (IS_GEN(dev_priv, 9, 10) && + if (IS_GEN_RANGE(dev_priv, 9, 10) && (fbc->state_cache.plane.adjusted_y & 3)) { fbc->no_fbc_reason = "plane Y offset is misaligned"; return false; @@ -839,7 +844,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); - if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; } @@ -1126,8 +1131,6 @@ void intel_fbc_disable(struct intel_crtc *crtc) if (!fbc_supported(dev_priv)) return; - WARN_ON(crtc->active); - mutex_lock(&fbc->lock); if (fbc->crtc == crtc) __intel_fbc_disable(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..e8f694b57b8a 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -37,9 +37,10 @@ #include <linux/init.h> #include <linux/vga_switcheroo.h> -#include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> + #include "intel_drv.h" #include "intel_frontbuffer.h" #include <drm/i915_drm.h> @@ -178,8 +179,9 @@ static int intelfb_create(struct drm_fb_helper *helper, const struct i915_ggtt_view view = { .type = I915_GGTT_VIEW_NORMAL, }; - struct fb_info *info; struct drm_framebuffer *fb; + intel_wakeref_t wakeref; + struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; @@ -210,7 +212,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -277,7 +279,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -285,7 +287,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev->struct_mutex); return ret; } @@ -336,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); - unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); + unsigned long conn_configured, conn_seq; int i, j; bool *save_enabled; bool fallback = true, ret = true; @@ -355,10 +357,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, drm_modeset_backoff(&ctx); memcpy(save_enabled, enabled, count); - mask = GENMASK(count - 1, 0); + conn_seq = GENMASK(count - 1, 0); conn_configured = 0; retry: - conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -371,7 +372,8 @@ retry: if (conn_configured & BIT(i)) continue; - if (conn_seq == 0 && !connector->has_tile) + /* First pass, only consider tiled connectors */ + if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -475,8 +477,10 @@ retry: conn_configured |= BIT(i); } - if ((conn_configured & mask) != mask && conn_configured != conn_seq) + if (conn_configured != conn_seq) { /* repeat until no more are found */ + conn_seq = conn_configured; goto retry; + } /* * If the BIOS didn't enable everything it could, fall back to have the @@ -679,6 +683,7 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; + mutex_init(&ifbdev->hpd_lock); drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) @@ -752,6 +757,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) intel_fbdev_destroy(ifbdev); } +/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD + * processing, fbdev will perform a full connector reprobe if a hotplug event + * was received while HPD was suspended. + */ +static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) +{ + bool send_hpd = false; + + mutex_lock(&ifbdev->hpd_lock); + ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; + send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; + ifbdev->hpd_waiting = false; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd) { + DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); + drm_fb_helper_hotplug_event(&ifbdev->helper); + } +} + void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -773,6 +798,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->fbdev_suspend_work); + console_lock(); } else { /* @@ -800,17 +826,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); + + intel_fbdev_hpd_set_suspend(ifbdev, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + bool send_hpd; if (!ifbdev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma || ifbdev->helper.deferred_setup) + + mutex_lock(&ifbdev->hpd_lock); + send_hpd = !ifbdev->hpd_suspended; + ifbdev->hpd_waiting = true; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 77c123cc8817..f33de4be4b89 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : - DE_PIPEB_FIFO_UNDERRUN; + u32 bit = (pipe == PIPE_A) ? + DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; if (enable) ilk_enable_display_irq(dev_priv, bit); @@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t err_int = I915_READ(GEN7_ERR_INT); + u32 err_int = I915_READ(GEN7_ERR_INT); lockdep_assert_held(&dev_priv->irq_lock); @@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t bit = (pch_transcoder == PIPE_A) ? - SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; + u32 bit = (pch_transcoder == PIPE_A) ? + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) ibx_enable_display_interrupt(dev_priv, bit); @@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; - uint32_t serr_int = I915_READ(SERR_INT); + u32 serr_int = I915_READ(SERR_INT); lockdep_assert_held(&dev_priv->irq_lock); @@ -258,11 +258,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, old = !crtc->cpu_fifo_underrun_disabled; crtc->cpu_fifo_underrun_disabled = !enable; - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); - else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) + else if (IS_GEN_RANGE(dev_priv, 5, 6)) ironlake_set_fifo_underrun_reporting(dev, pipe, enable); - else if (IS_GEN7(dev_priv)) + else if (IS_GEN(dev_priv, 7)) ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (INTEL_GEN(dev_priv) >= 8) broadwell_set_fifo_underrun_reporting(dev, pipe, enable); @@ -369,7 +369,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, return; /* GMCH can't disable fifo underruns, filter them. */ - if (HAS_GMCH_DISPLAY(dev_priv) && + if (HAS_GMCH(dev_priv) && crtc->cpu_fifo_underrun_disabled) return; @@ -421,9 +421,9 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) if (crtc->cpu_fifo_underrun_disabled) continue; - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) i9xx_check_fifo_underruns(crtc); - else if (IS_GEN7(dev_priv)) + else if (IS_GEN(dev_priv, 7)) ivybridge_check_fifo_underruns(crtc); } diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index c3379bde266f..16f253deaf8d 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -60,7 +60,6 @@ * functions is deprecated and should be avoided. */ -#include <drm/drmP.h> #include "intel_drv.h" #include "intel_frontbuffer.h" diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h index 105e2a9e874a..a34ece53a771 100644 --- a/drivers/gpu/drm/i915/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/intel_gpu_commands.h @@ -105,14 +105,18 @@ #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ -#define MI_SEMAPHORE_POLL (1<<15) -#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) +#define MI_SEMAPHORE_POLL (1 << 15) +#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) +#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) +#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12) +#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) +#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) +#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ #define MI_USE_GGTT (1 << 22) /* g4x+ */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) -#define MI_STORE_DWORD_INDEX_SHIFT 2 /* * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 8660af3fd755..3aabfa2d9198 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, guc_send_reg(guc, i), FW_REG_READ | FW_REG_WRITE); } @@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc) goto err_log; GEM_BUG_ON(!guc->ads_vma); + if (HAS_GUC_CT(dev_priv)) { + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + } + /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); return 0; +err_ads: + intel_guc_ads_destroy(guc); err_log: intel_guc_log_destroy(&guc->log); err_shared: @@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc) struct drm_i915_private *dev_priv = guc_to_i915(guc); i915_ggtt_disable_guc(dev_priv); + + if (HAS_GUC_CT(dev_priv)) + intel_guc_ct_fini(&guc->ct); + intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); guc_shared_data_destroy(guc); @@ -357,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc) * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); I915_WRITE(SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); } int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, @@ -386,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = &dev_priv->uncore; u32 status; int i; int ret; @@ -402,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains); + intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); for (i = 0; i < len; i++) - I915_WRITE(guc_send_reg(guc, i), action[i]); + intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); - POSTING_READ(guc_send_reg(guc, i - 1)); + intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); intel_guc_notify(guc); @@ -415,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, * No GuC command should ever take longer than 10ms. * Fast commands should still complete in 10us. */ - ret = __intel_wait_for_register_fw(dev_priv, + ret = __intel_wait_for_register_fw(uncore, guc_send_reg(guc, 0), INTEL_GUC_MSG_TYPE_MASK, INTEL_GUC_MSG_TYPE_RESPONSE << @@ -442,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ret = INTEL_GUC_MSG_TO_DATA(status); out: - intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains); + intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); mutex_unlock(&guc->send_mutex); return ret; @@ -472,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) spin_unlock(&guc->irq_lock); enable_rpm_wakeref_asserts(dev_priv); - intel_guc_to_host_process_recv_msg(guc, msg); + intel_guc_to_host_process_recv_msg(guc, &msg, 1); } -void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg) +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len) { + u32 msg; + + if (unlikely(!len)) + return -EPROTO; + /* Make sure to handle only enabled messages */ - msg &= guc->msg_enabled_mask; + msg = payload[0] & guc->msg_enabled_mask; if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) intel_guc_log_handle_flush_event(&guc->log); + + return 0; } int intel_guc_sample_forcewake(struct intel_guc *guc) @@ -544,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc, if (ret) return ret; - ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14), + ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK, 0, 0, 10, &status); if (ret) diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 0f1c4f9ebfd8..2c59ff8d9f39 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -32,6 +32,7 @@ #include "intel_guc_log.h" #include "intel_guc_reg.h" #include "intel_uc_fw.h" +#include "i915_utils.h" #include "i915_vma.h" struct guc_preempt_work { @@ -164,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, void intel_guc_to_host_event_handler(struct intel_guc *guc); void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc); -void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg); +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len); int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); @@ -192,4 +194,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) spin_unlock_irq(&guc->irq_lock); } +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine); + #endif diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index f0db62887f50..bec62f34b15a 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc) * to find it. Note that we have to skip our header (1 page), * because our GuC shared data is there. */ - kernel_ctx_vma = to_intel_context(dev_priv->kernel_context, - dev_priv->engine[RCS])->state; + kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state; blob->ads.golden_context_lrca = intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index a52883e9146f..dde1dc0d6e69 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, return err; } -static bool ctch_is_open(struct intel_guc_ct_channel *ctch) -{ - return ctch->vma != NULL; -} - static int ctch_init(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { @@ -214,25 +209,21 @@ err_out: static void ctch_fini(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { + GEM_BUG_ON(ctch->enabled); + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); } -static int ctch_open(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +static int ctch_enable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) { u32 base; int err; int i; - CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n", - ctch->owner, yesno(ctch_is_open(ctch))); + GEM_BUG_ON(!ctch->vma); - if (!ctch->vma) { - err = ctch_init(guc, ctch); - if (unlikely(err)) - goto err_out; - GEM_BUG_ON(!ctch->vma); - } + GEM_BUG_ON(ctch->enabled); /* vma should be already allocated and map'ed */ base = intel_guc_ggtt_offset(guc, ctch->vma); @@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc, base + PAGE_SIZE/4 * CTB_RECV, INTEL_GUC_CT_BUFFER_TYPE_RECV); if (unlikely(err)) - goto err_fini; + goto err_out; err = guc_action_register_ct_buffer(guc, base + PAGE_SIZE/4 * CTB_SEND, @@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc, if (unlikely(err)) goto err_deregister; + ctch->enabled = true; + return 0; err_deregister: guc_action_deregister_ct_buffer(guc, ctch->owner, INTEL_GUC_CT_BUFFER_TYPE_RECV); -err_fini: - ctch_fini(guc, ctch); err_out: DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); return err; } -static void ctch_close(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +static void ctch_disable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) { - GEM_BUG_ON(!ctch_is_open(ctch)); + GEM_BUG_ON(!ctch->enabled); + + ctch->enabled = false; guc_action_deregister_ct_buffer(guc, ctch->owner, @@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc, guc_action_deregister_ct_buffer(guc, ctch->owner, INTEL_GUC_CT_BUFFER_TYPE_RECV); - ctch_fini(guc, ctch); } static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) @@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct, u32 fence; int err; - GEM_BUG_ON(!ctch_is_open(ctch)); + GEM_BUG_ON(!ctch->enabled); GEM_BUG_ON(!len); GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); @@ -709,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct, u32 action, u32 len, const u32 *payload) { struct intel_guc *guc = ct_to_guc(ct); + int ret; CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: - if (unlikely(len < 1)) + ret = intel_guc_to_host_process_recv_msg(guc, payload, len); + if (unlikely(ret)) goto fail_unexpected; - intel_guc_to_host_process_recv_msg(guc, *payload); break; default: @@ -817,7 +810,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct) u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ int err = 0; - if (!ctch_is_open(ctch)) + if (!ctch->enabled) return; do { @@ -849,6 +842,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) } /** + * intel_guc_ct_init - Init CT communication + * @ct: pointer to CT struct + * + * Allocate memory required for communication via + * the CT channel. + * + * Shall only be called for platforms with HAS_GUC_CT. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_init(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + int err; + + err = ctch_init(guc, ctch); + if (unlikely(err)) { + DRM_ERROR("CT: can't open channel %d; err=%d\n", + ctch->owner, err); + return err; + } + + GEM_BUG_ON(!ctch->vma); + return 0; +} + +/** + * intel_guc_ct_fini - Fini CT communication + * @ct: pointer to CT struct + * + * Deallocate memory required for communication via + * the CT channel. + * + * Shall only be called for platforms with HAS_GUC_CT. + */ +void intel_guc_ct_fini(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + ctch_fini(guc, ctch); +} + +/** * intel_guc_ct_enable - Enable buffer based command transport. * @ct: pointer to CT struct * @@ -865,7 +903,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) GEM_BUG_ON(!HAS_GUC_CT(i915)); - err = ctch_open(guc, ctch); + if (ctch->enabled) + return 0; + + err = ctch_enable(guc, ctch); if (unlikely(err)) return err; @@ -890,10 +931,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) GEM_BUG_ON(!HAS_GUC_CT(i915)); - if (!ctch_is_open(ctch)) + if (!ctch->enabled) return; - ctch_close(guc, ctch); + ctch_disable(guc, ctch); /* Disable send */ guc->send = intel_guc_send_nop; diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index d774895ab143..f5e7f0663304 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -66,6 +66,7 @@ struct intel_guc_ct_channel { struct intel_guc_ct_buffer ctbs[2]; u32 owner; u32 next_fence; + bool enabled; }; /** Holds all command transport channels. @@ -90,6 +91,8 @@ struct intel_guc_ct { }; void intel_guc_ct_init_early(struct intel_guc_ct *ct); +int intel_guc_ct_init(struct intel_guc_ct *ct); +void intel_guc_ct_fini(struct intel_guc_ct *ct); int intel_guc_ct_enable(struct intel_guc_ct *ct); void intel_guc_ct_disable(struct intel_guc_ct *ct); diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index a67144ee5ceb..792a551450c7 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->path = I915_KBL_GUC_UCODE; guc_fw->major_ver_wanted = KBL_FW_MAJOR; guc_fw->minor_ver_wanted = KBL_FW_MINOR; - } else { - dev_info(dev_priv->drm.dev, - "%s: No firmware known for this platform!\n", - intel_uc_fw_type_repr(guc_fw->type)); } } @@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc) else I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - if (IS_GEN9(dev_priv)) { + if (IS_GEN(dev_priv, 9)) { /* DOP Clock Gating Enable for GuC clocks */ I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | I915_READ(GEN7_MISCCPCTL))); @@ -245,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); guc_prepare_xfer(guc); @@ -258,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) ret = guc_xfer_ucode(guc, vma); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return ret; } diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index d3ebdbc0182e..7146524264dd 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -140,6 +140,9 @@ static struct dentry *create_buf_file_callback(const char *filename, buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + return buf_file; } @@ -436,6 +439,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); + intel_wakeref_t wakeref; guc_read_update_log_buffer(log); @@ -443,9 +447,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log) * Generally device is expected to be active only at this * time, so get/put should be really quick. */ - intel_runtime_pm_get(dev_priv); - guc_action_flush_log_complete(guc); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + guc_action_flush_log_complete(guc); } int intel_guc_log_create(struct intel_guc_log *log) @@ -505,7 +508,8 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; + intel_wakeref_t wakeref; + int ret = 0; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); @@ -519,16 +523,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) mutex_lock(&dev_priv->drm.struct_mutex); - if (log->level == level) { - ret = 0; + if (log->level == level) goto out_unlock; - } - intel_runtime_pm_get(dev_priv); - ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), - GUC_LOG_LEVEL_IS_ENABLED(level), - GUC_LOG_LEVEL_TO_VERBOSITY(level)); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + ret = guc_action_control_log(guc, + GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; @@ -601,6 +603,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); + intel_wakeref_t wakeref; /* * Before initiating the forceful flush, wait for any pending/ongoing @@ -608,9 +611,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) */ flush_work(&log->relay.flush_work); - intel_runtime_pm_get(i915); - guc_action_flush_log(guc); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); @@ -618,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) void intel_guc_log_relay_close(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + guc_log_disable_flush_events(log); + synchronize_irq(i915->drm.irq); + flush_work(&log->relay.flush_work); mutex_lock(&log->relay.lock); diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 1570dcbe249c..c58922226d47 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -81,6 +81,12 @@ * */ +static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -376,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) desc->db_id = client->doorbell_id; for_each_engine_masked(engine, dev_priv, client->engines, tmp) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce = intel_context_lookup(ctx, engine); u32 guc_engine_id = engine->guc_id; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; @@ -387,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - if (!ce->state) + if (!ce || !ce->state) break; /* XXX: continue? */ /* @@ -529,7 +535,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) spin_lock(&client->wq_lock); guc_wq_item_append(client, engine->guc_id, ctx_desc, - ring_tail, rq->global_seqno); + ring_tail, rq->fence.seqno); guc_ring_doorbell(client); client->submissions[engine->id] += 1; @@ -561,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work) preempt_work[engine->id]); struct intel_guc_client *client = guc->preempt_client; struct guc_stage_desc *stage_desc = __get_stage_desc(client); - struct intel_context *ce = to_intel_context(client->owner, engine); + struct intel_context *ce = intel_context_lookup(client->owner, engine); u32 data[7]; if (!ce->ring->emit) { /* recreate upon load/resume */ @@ -569,14 +575,16 @@ static void inject_preempt_context(struct work_struct *work) u32 *cs; cs = ce->ring->vaddr; - if (engine->id == RCS) { + if (engine->class == RENDER_CLASS) { cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED, - addr); + addr, + PIPE_CONTROL_CS_STALL); } else { cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED, - addr); + addr, + 0); *cs++ = MI_NOOP; *cs++ = MI_NOOP; } @@ -622,6 +630,8 @@ static void inject_preempt_context(struct work_struct *work) EXECLISTS_ACTIVE_PREEMPT); tasklet_schedule(&engine->execlists.tasklet); } + + (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++); } /* @@ -665,7 +675,7 @@ static void complete_preempt_context(struct intel_engine_cs *engine) execlists_unwind_incomplete_requests(execlists); wait_for_guc_preempt_report(engine); - intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0); + intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0); } /** @@ -711,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq) static inline int port_prio(const struct execlist_port *port) { - return rq_prio(port_request(port)); + return rq_prio(port_request(port)) | __NO_PREEMPTION; } static bool __guc_dequeue(struct intel_engine_cs *engine) @@ -730,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) if (intel_engine_has_preemption(engine)) { struct guc_preempt_work *preempt_work = &engine->i915->guc.preempt_work[engine->id]; - int prio = execlists->queue_priority; + int prio = execlists->queue_priority_hint; if (__execlists_need_preempt(prio, port_prio(port))) { execlists_set_active(execlists, @@ -772,11 +782,11 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: - execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; + execlists->queue_priority_hint = + rb ? to_priolist(rb)->priority : INT_MIN; if (submit) port_assign(port, last); if (last) @@ -823,7 +833,7 @@ static void guc_submission_tasklet(unsigned long data) } if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && - intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) == + intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == GUC_PREEMPT_FINISHED) complete_preempt_context(engine); @@ -833,8 +843,7 @@ static void guc_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->timeline.lock, flags); } -static struct i915_request * -guc_reset_prepare(struct intel_engine_cs *engine) +static void guc_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -860,8 +869,6 @@ guc_reset_prepare(struct intel_engine_cs *engine) */ if (engine->i915->guc.preempt_wq) flush_workqueue(engine->i915->guc.preempt_wq); - - return i915_gem_find_active_request(engine); } /* @@ -1024,7 +1031,7 @@ static int guc_clients_create(struct intel_guc *guc) GEM_BUG_ON(guc->preempt_client); client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, GUC_CLIENT_PRIORITY_KMD_NORMAL, dev_priv->kernel_context); if (IS_ERR(client)) { @@ -1035,7 +1042,7 @@ static int guc_clients_create(struct intel_guc *guc) if (dev_priv->preempt_context) { client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, GUC_CLIENT_PRIORITY_KMD_HIGH, dev_priv->preempt_context); if (IS_ERR(client)) { @@ -1255,10 +1262,12 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) static void guc_submission_park(struct intel_engine_cs *engine) { intel_engine_unpin_breadcrumbs_irq(engine); + engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; } static void guc_submission_unpark(struct intel_engine_cs *engine) { + engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; intel_engine_pin_breadcrumbs_irq(engine); } diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h index 169c54568340..aa5e6749c925 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/intel_guc_submission.h @@ -29,6 +29,7 @@ #include "i915_gem.h" #include "i915_selftest.h" +#include "intel_engine_types.h" struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index e26d05a46451..3d51ed1428d4 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -23,144 +23,18 @@ */ #include "i915_drv.h" +#include "i915_reset.h" -static bool -ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) -{ - ipehr &= ~MI_SEMAPHORE_SYNC_MASK; - return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | - MI_SEMAPHORE_REGISTER); -} - -static struct intel_engine_cs * -semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, - u64 offset) -{ - struct drm_i915_private *dev_priv = engine->i915; - u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - struct intel_engine_cs *signaller; - enum intel_engine_id id; - - for_each_engine(signaller, dev_priv, id) { - if (engine == signaller) - continue; - - if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id]) - return signaller; - } - - DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n", - engine->name, ipehr); - - return ERR_PTR(-ENODEV); -} - -static struct intel_engine_cs * -semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) -{ - struct drm_i915_private *dev_priv = engine->i915; - void __iomem *vaddr; - u32 cmd, ipehr, head; - u64 offset = 0; - int i, backwards; - - /* - * This function does not support execlist mode - any attempt to - * proceed further into this function will result in a kernel panic - * when dereferencing ring->buffer, which is not set up in execlist - * mode. - * - * The correct way of doing it would be to derive the currently - * executing ring buffer from the current context, which is derived - * from the currently running request. Unfortunately, to get the - * current request we would have to grab the struct_mutex before doing - * anything else, which would be ill-advised since some other thread - * might have grabbed it already and managed to hang itself, causing - * the hang checker to deadlock. - * - * Therefore, this function does not support execlist mode in its - * current form. Just return NULL and move on. - */ - if (engine->buffer == NULL) - return NULL; - - ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine, ipehr)) - return NULL; - - /* - * HEAD is likely pointing to the dword after the actual command, - * so scan backwards until we find the MBOX. But limit it to just 3 - * or 4 dwords depending on the semaphore wait command size. - * Note that we don't care about ACTHD here since that might - * point at at batch, and semaphores are always emitted into the - * ringbuffer itself. - */ - head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; - vaddr = (void __iomem *)engine->buffer->vaddr; - - for (i = backwards; i; --i) { - /* - * Be paranoid and presume the hw has gone off into the wild - - * our ring is smaller than what the hardware (and hence - * HEAD_ADDR) allows. Also handles wrap-around. - */ - head &= engine->buffer->size - 1; - - /* This here seems to blow up */ - cmd = ioread32(vaddr + head); - if (cmd == ipehr) - break; - - head -= 4; - } - - if (!i) - return NULL; - - *seqno = ioread32(vaddr + head + 4) + 1; - return semaphore_wait_to_signaller_ring(engine, ipehr, offset); -} - -static int semaphore_passed(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - struct intel_engine_cs *signaller; +struct hangcheck { + u64 acthd; u32 seqno; - - engine->hangcheck.deadlock++; - - signaller = semaphore_waits_for(engine, &seqno); - if (signaller == NULL) - return -1; - - if (IS_ERR(signaller)) - return 0; - - /* Prevent pathological recursion due to driver bugs */ - if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) - return -1; - - if (intel_engine_signaled(signaller, seqno)) - return 1; - - /* cursory check for an unkickable deadlock */ - if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && - semaphore_passed(signaller) < 0) - return -1; - - return 0; -} - -static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) - engine->hangcheck.deadlock = 0; -} + enum intel_engine_hangcheck_action action; + unsigned long action_timestamp; + int deadlock; + struct intel_instdone instdone; + bool wedged:1; + bool stalled:1; +}; static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) { @@ -182,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) int slice; int subslice; - if (engine->id != RCS) + if (engine->id != RCS0) return true; intel_engine_get_instdone(engine, &instdone); @@ -236,7 +110,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != ENGINE_DEAD) return ha; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) return ENGINE_DEAD; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -244,64 +118,36 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) * and break the hang. This should work on * all but the second generation chipsets. */ - tmp = I915_READ_CTL(engine); + tmp = ENGINE_READ(engine, RING_CTL); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, BIT(engine->id), 0, + i915_handle_error(dev_priv, engine->mask, 0, "stuck wait on %s", engine->name); - I915_WRITE_CTL(engine, tmp); + ENGINE_WRITE(engine, RING_CTL, tmp); return ENGINE_WAIT_KICK; } - if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) { - switch (semaphore_passed(engine)) { - default: - return ENGINE_DEAD; - case 1: - i915_handle_error(dev_priv, ALL_ENGINES, 0, - "stuck semaphore on %s", - engine->name); - I915_WRITE_CTL(engine, tmp); - return ENGINE_WAIT_KICK; - case 0: - return ENGINE_WAIT; - } - } - return ENGINE_DEAD; } static void hangcheck_load_sample(struct intel_engine_cs *engine, - struct intel_engine_hangcheck *hc) + struct hangcheck *hc) { - /* We don't strictly need an irq-barrier here, as we are not - * serving an interrupt request, be paranoid in case the - * barrier has side-effects (such as preventing a broken - * cacheline snoop) and so be sure that we can see the seqno - * advance. If the seqno should stick, due to a stale - * cacheline, we would erroneously declare the GPU hung. - */ - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - hc->acthd = intel_engine_get_active_head(engine); - hc->seqno = intel_engine_get_seqno(engine); + hc->seqno = intel_engine_get_hangcheck_seqno(engine); } static void hangcheck_store_sample(struct intel_engine_cs *engine, - const struct intel_engine_hangcheck *hc) + const struct hangcheck *hc) { engine->hangcheck.acthd = hc->acthd; - engine->hangcheck.seqno = hc->seqno; - engine->hangcheck.action = hc->action; - engine->hangcheck.stalled = hc->stalled; - engine->hangcheck.wedged = hc->wedged; + engine->hangcheck.last_seqno = hc->seqno; } static enum intel_engine_hangcheck_action hangcheck_get_action(struct intel_engine_cs *engine, - const struct intel_engine_hangcheck *hc) + const struct hangcheck *hc) { - if (engine->hangcheck.seqno != hc->seqno) + if (engine->hangcheck.last_seqno != hc->seqno) return ENGINE_ACTIVE_SEQNO; if (intel_engine_is_idle(engine)) @@ -311,7 +157,7 @@ hangcheck_get_action(struct intel_engine_cs *engine, } static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, - struct intel_engine_hangcheck *hc) + struct hangcheck *hc) { unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; @@ -357,10 +203,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, break; case ENGINE_DEAD: - if (GEM_SHOW_DEBUG()) { - struct drm_printer p = drm_debug_printer("hangcheck"); - intel_engine_dump(engine, &p, "%s\n", engine->name); - } break; default: @@ -379,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, unsigned int stuck) { struct intel_engine_cs *engine; + intel_engine_mask_t tmp; char msg[80]; - unsigned int tmp; int len; /* If some rings hung but others were still busy, only @@ -421,32 +263,43 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!READ_ONCE(dev_priv->gt.awake)) return; - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return; /* As enabling the GPU requires fairly extensive mmio access, * periodically arm the mmio checker to see if we are triggering * any invalid access. */ - intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); for_each_engine(engine, dev_priv, id) { - struct intel_engine_hangcheck hc; + struct hangcheck hc; - semaphore_clear_deadlocks(dev_priv); + intel_engine_signal_breadcrumbs(engine); hangcheck_load_sample(engine, &hc); hangcheck_accumulate_sample(engine, &hc); hangcheck_store_sample(engine, &hc); - if (engine->hangcheck.stalled) { - hung |= intel_engine_flag(engine); + if (hc.stalled) { + hung |= engine->mask; if (hc.action != ENGINE_DEAD) - stuck |= intel_engine_flag(engine); + stuck |= engine->mask; } - if (engine->hangcheck.wedged) - wedged |= intel_engine_flag(engine); + if (hc.wedged) + wedged |= engine->mask; + } + + if (GEM_SHOW_DEBUG() && (hung | stuck)) { + struct drm_printer p = drm_debug_printer("hangcheck"); + + for_each_engine(engine, dev_priv, id) { + if (intel_engine_is_idle(engine)) + continue; + + intel_engine_dump(engine, &p, "%s\n", engine->name); + } } if (wedged) { diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 1bf487f94254..86965fa37739 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -6,15 +6,18 @@ * Sean Paul <seanpaul@chromium.org> */ -#include <drm/drmP.h> #include <drm/drm_hdcp.h> +#include <drm/i915_component.h> #include <linux/i2c.h> #include <linux/random.h> +#include <linux/component.h> #include "intel_drv.h" #include "i915_reg.h" #define KEY_LOAD_TRIES 5 +#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 +#define HDCP2_LC_RETRY_CNT 3 static bool intel_hdcp_is_ksv_valid(u8 *ksv) @@ -72,6 +75,52 @@ bool intel_hdcp_capable(struct intel_connector *connector) return capable; } +/* Is HDCP2.2 capable on Platform and Sink */ +static bool intel_hdcp2_capable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + bool capable = false; + + /* I915 support for HDCP2.2 */ + if (!hdcp->hdcp2_supported) + return false; + + /* MEI interface is solid */ + mutex_lock(&dev_priv->hdcp_comp_mutex); + if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return false; + } + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + /* Sink's capability for HDCP2.2 */ + hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); + + return capable; +} + +static inline bool intel_hdcp_in_use(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum port port = connector->encoder->port; + u32 reg; + + reg = I915_READ(PORT_HDCP_STATUS(port)); + return reg & HDCP_STATUS_ENC; +} + +static inline bool intel_hdcp2_in_use(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum port port = connector->encoder->port; + u32 reg; + + reg = I915_READ(HDCP2_STATUS_DDI(port)); + return reg & LINK_ENCRYPTION_STATUS; +} + static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, const struct intel_hdcp_shim *shim) { @@ -157,10 +206,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) /* * Initiate loading the HDCP key from fuses. * - * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL - * differ in the key load trigger process from other platforms. + * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9 + * platforms except BXT and GLK, differ in the key load trigger process + * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f. */ - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_GEN9_BC(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1); @@ -175,7 +225,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) } /* Wait for the keys to load (500us) */ - ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS, + ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) @@ -193,7 +243,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) { I915_WRITE(HDCP_SHA_TEXT, sha_text); - if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { DRM_ERROR("Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; @@ -424,7 +474,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, /* Tell the HW we're done with the hash and wait for it to ACK */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, HDCP_SHA1_COMPLETE, 1)) { DRM_ERROR("Timed out waiting for SHA1 complete\n"); @@ -554,7 +604,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), HDCP_STATUS_AN_READY, HDCP_STATUS_AN_READY, 1)) { DRM_ERROR("Timed out waiting for An\n"); @@ -635,8 +685,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, } /* Wait for encryption confirmation */ - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), - HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) { + if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), + HDCP_STATUS_ENC, HDCP_STATUS_ENC, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -664,9 +715,11 @@ static int _intel_hdcp_disable(struct intel_connector *connector) DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); + hdcp->hdcp_encrypted = false; I915_WRITE(PORT_HDCP_CONF(port), 0); - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0, - 20)) { + if (intel_wait_for_register(&dev_priv->uncore, + PORT_HDCP_STATUS(port), ~0, 0, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } @@ -709,8 +762,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector) /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim); - if (!ret) + if (!ret) { + hdcp->hdcp_encrypted = true; return 0; + } DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); @@ -728,16 +783,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) return container_of(hdcp, struct intel_connector, hdcp); } -static void intel_hdcp_check_work(struct work_struct *work) +/* Implements Part 3 of the HDCP authorization procedure */ +static int intel_hdcp_check_link(struct intel_connector *connector) { - struct intel_hdcp *hdcp = container_of(to_delayed_work(work), - struct intel_hdcp, - check_work); - struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + struct intel_hdcp *hdcp = &connector->hdcp; + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret = 0; - if (!intel_hdcp_check_link(connector)) - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP_CHECK_PERIOD_MS); + mutex_lock(&hdcp->mutex); + + /* Check_link valid only when HDCP1.4 is enabled */ + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || + !hdcp->hdcp_encrypted) { + ret = -EINVAL; + goto out; + } + + if (WARN_ON(!intel_hdcp_in_use(connector))) { + DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", + connector->base.name, connector->base.base.id, + I915_READ(PORT_HDCP_STATUS(port))); + ret = -ENXIO; + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + if (hdcp->shim->check_link(intel_dig_port)) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } + goto out; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", + connector->base.name, connector->base.base.id); + + ret = _intel_hdcp_disable(connector); + if (ret) { + DRM_ERROR("Failed to disable hdcp (%d)\n", ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = _intel_hdcp_enable(connector); + if (ret) { + DRM_ERROR("Failed to enable hdcp (%d)\n", ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + +out: + mutex_unlock(&hdcp->mutex); + return ret; } static void intel_hdcp_prop_work(struct work_struct *work) @@ -768,18 +871,932 @@ static void intel_hdcp_prop_work(struct work_struct *work) bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { /* PORT E doesn't have HDCP, and PORT F is disabled */ - return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && - !IS_CHERRYVIEW(dev_priv) && port < PORT_E); + return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; +} + +static int +hdcp2_prepare_ake_init(struct intel_connector *connector, + struct hdcp2_ake_init *ake_data) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); + if (ret) + DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, + struct hdcp2_ake_send_cert *rx_cert, + bool *paired, + struct hdcp2_ake_no_stored_km *ek_pub_km, + size_t *msg_sz) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, + rx_cert, paired, + ek_pub_km, msg_sz); + if (ret < 0) + DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_verify_hprime(struct intel_connector *connector, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); + if (ret < 0) + DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_store_pairing_info(struct intel_connector *connector, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); + if (ret < 0) + DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_prepare_lc_init(struct intel_connector *connector, + struct hdcp2_lc_init *lc_init) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); + if (ret < 0) + DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_lprime(struct intel_connector *connector, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); + if (ret < 0) + DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_prepare_skey(struct intel_connector *connector, + struct hdcp2_ske_send_eks *ske_data) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); + if (ret < 0) + DRM_DEBUG_KMS("Get session key failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack *rep_send_ack) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, + rep_topology, + rep_send_ack); + if (ret < 0) + DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_mprime(struct intel_connector *connector, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); + if (ret < 0) + DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_authenticate_port(struct intel_connector *connector) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); + if (ret < 0) + DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_close_mei_session(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->close_hdcp_session(comp->mei_dev, + &connector->hdcp.port_data); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_deauthenticate_port(struct intel_connector *connector) +{ + return hdcp2_close_mei_session(connector); +} + +/* Authentication flow starts from here */ +static int hdcp2_authentication_key_exchange(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_ake_init ake_init; + struct hdcp2_ake_send_cert send_cert; + struct hdcp2_ake_no_stored_km no_stored_km; + struct hdcp2_ake_send_hprime send_hprime; + struct hdcp2_ake_send_pairing_info pairing_info; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + size_t size; + int ret; + + /* Init for seq_num */ + hdcp->seq_num_v = 0; + hdcp->seq_num_m = 0; + + ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); + if (ret < 0) + return ret; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, + sizeof(msgs.ake_init)); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, + &msgs.send_cert, sizeof(msgs.send_cert)); + if (ret < 0) + return ret; + + if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) + return -EINVAL; + + hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); + + /* + * Here msgs.no_stored_km will hold msgs corresponding to the km + * stored also. + */ + ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, + &hdcp->is_paired, + &msgs.no_stored_km, &size); + if (ret < 0) + return ret; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, + &msgs.send_hprime, sizeof(msgs.send_hprime)); + if (ret < 0) + return ret; + + ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); + if (ret < 0) + return ret; + + if (!hdcp->is_paired) { + /* Pairing is required */ + ret = shim->read_2_2_msg(intel_dig_port, + HDCP_2_2_AKE_SEND_PAIRING_INFO, + &msgs.pairing_info, + sizeof(msgs.pairing_info)); + if (ret < 0) + return ret; + + ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); + if (ret < 0) + return ret; + hdcp->is_paired = true; + } + + return 0; +} + +static int hdcp2_locality_check(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_lc_init lc_init; + struct hdcp2_lc_send_lprime send_lprime; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + int tries = HDCP2_LC_RETRY_CNT, ret, i; + + for (i = 0; i < tries; i++) { + ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); + if (ret < 0) + continue; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, + sizeof(msgs.lc_init)); + if (ret < 0) + continue; + + ret = shim->read_2_2_msg(intel_dig_port, + HDCP_2_2_LC_SEND_LPRIME, + &msgs.send_lprime, + sizeof(msgs.send_lprime)); + if (ret < 0) + continue; + + ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); + if (!ret) + break; + } + + return ret; +} + +static int hdcp2_session_key_exchange(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + struct hdcp2_ske_send_eks send_eks; + int ret; + + ret = hdcp2_prepare_skey(connector, &send_eks); + if (ret < 0) + return ret; + + ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, + sizeof(send_eks)); + if (ret < 0) + return ret; + + return 0; +} + +static +int hdcp2_propagate_stream_management_info(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_rep_stream_manage stream_manage; + struct hdcp2_rep_stream_ready stream_ready; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + int ret; + + /* Prepare RepeaterAuth_Stream_Manage msg */ + msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; + drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); + + /* K no of streams is fixed as 1. Stored as big-endian. */ + msgs.stream_manage.k = cpu_to_be16(1); + + /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ + msgs.stream_manage.streams[0].stream_id = 0; + msgs.stream_manage.streams[0].stream_type = hdcp->content_type; + + /* Send it to Repeater */ + ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, + sizeof(msgs.stream_manage)); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, + &msgs.stream_ready, sizeof(msgs.stream_ready)); + if (ret < 0) + return ret; + + hdcp->port_data.seq_num_m = hdcp->seq_num_m; + hdcp->port_data.streams[0].stream_type = hdcp->content_type; + + ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); + if (ret < 0) + return ret; + + hdcp->seq_num_m++; + + if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { + DRM_DEBUG_KMS("seq_num_m roll over.\n"); + return -1; + } + + return 0; +} + +static +int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_rep_send_receiverid_list recvid_list; + struct hdcp2_rep_send_ack rep_ack; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + u8 *rx_info; + u32 seq_num_v; + int ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, + &msgs.recvid_list, sizeof(msgs.recvid_list)); + if (ret < 0) + return ret; + + rx_info = msgs.recvid_list.rx_info; + + if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || + HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { + DRM_DEBUG_KMS("Topology Max Size Exceeded\n"); + return -EINVAL; + } + + /* Converting and Storing the seq_num_v to local variable as DWORD */ + seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v); + + if (seq_num_v < hdcp->seq_num_v) { + /* Roll over of the seq_num_v from repeater. Reauthenticate. */ + DRM_DEBUG_KMS("Seq_num_v roll over.\n"); + return -EINVAL; + } + + ret = hdcp2_verify_rep_topology_prepare_ack(connector, + &msgs.recvid_list, + &msgs.rep_ack); + if (ret < 0) + return ret; + + hdcp->seq_num_v = seq_num_v; + ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, + sizeof(msgs.rep_ack)); + if (ret < 0) + return ret; + + return 0; +} + +static int hdcp2_authenticate_repeater(struct intel_connector *connector) +{ + int ret; + + ret = hdcp2_authenticate_repeater_topology(connector); + if (ret < 0) + return ret; + + return hdcp2_propagate_stream_management_info(connector); +} + +static int hdcp2_authenticate_sink(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + const struct intel_hdcp_shim *shim = hdcp->shim; + int ret; + + ret = hdcp2_authentication_key_exchange(connector); + if (ret < 0) { + DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret); + return ret; + } + + ret = hdcp2_locality_check(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret); + return ret; + } + + ret = hdcp2_session_key_exchange(connector); + if (ret < 0) { + DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret); + return ret; + } + + if (shim->config_stream_type) { + ret = shim->config_stream_type(intel_dig_port, + hdcp->is_repeater, + hdcp->content_type); + if (ret < 0) + return ret; + } + + if (hdcp->is_repeater) { + ret = hdcp2_authenticate_repeater(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret); + return ret; + } + } + + hdcp->port_data.streams[0].stream_type = hdcp->content_type; + ret = hdcp2_authenticate_port(connector); + if (ret < 0) + return ret; + + return ret; +} + +static int hdcp2_enable_encryption(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret; + + WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); + + if (hdcp->shim->toggle_signalling) { + ret = hdcp->shim->toggle_signalling(intel_dig_port, true); + if (ret) { + DRM_ERROR("Failed to enable HDCP signalling. %d\n", + ret); + return ret; + } + } + + if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { + /* Link is Authenticated. Now set for Encryption */ + I915_WRITE(HDCP2_CTL_DDI(port), + I915_READ(HDCP2_CTL_DDI(port)) | + CTL_LINK_ENCRYPTION_REQ); + } + + ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), + LINK_ENCRYPTION_STATUS, + LINK_ENCRYPTION_STATUS, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + + return ret; +} + +static int hdcp2_disable_encryption(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret; + + WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); + + I915_WRITE(HDCP2_CTL_DDI(port), + I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); + + ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), + LINK_ENCRYPTION_STATUS, 0x0, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + if (ret == -ETIMEDOUT) + DRM_DEBUG_KMS("Disable Encryption Timedout"); + + if (hdcp->shim->toggle_signalling) { + ret = hdcp->shim->toggle_signalling(intel_dig_port, false); + if (ret) { + DRM_ERROR("Failed to disable HDCP signalling. %d\n", + ret); + return ret; + } + } + + return ret; +} + +static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) +{ + int ret, i, tries = 3; + + for (i = 0; i < tries; i++) { + ret = hdcp2_authenticate_sink(connector); + if (!ret) + break; + + /* Clearing the mei hdcp session */ + DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n", + i + 1, tries, ret); + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + } + + if (i != tries) { + /* + * Ensuring the required 200mSec min time interval between + * Session Key Exchange and encryption. + */ + msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); + ret = hdcp2_enable_encryption(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret); + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + } + } + + return ret; +} + +static int _intel_hdcp2_enable(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); + + ret = hdcp2_authenticate_and_encrypt(connector); + if (ret) { + DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n", + hdcp->content_type, ret); + return ret; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); + + hdcp->hdcp2_encrypted = true; + return 0; +} + +static int _intel_hdcp2_disable(struct intel_connector *connector) +{ + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n", + connector->base.name, connector->base.base.id); + + ret = hdcp2_disable_encryption(connector); + + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + + connector->hdcp.hdcp2_encrypted = false; + + return ret; +} + +/* Implements the Link Integrity Check for HDCP2.2 */ +static int intel_hdcp2_check_link(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret = 0; + + mutex_lock(&hdcp->mutex); + + /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || + !hdcp->hdcp2_encrypted) { + ret = -EINVAL; + goto out; + } + + if (WARN_ON(!intel_hdcp2_in_use(connector))) { + DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", + I915_READ(HDCP2_STATUS_DDI(port))); + ret = -ENXIO; + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = hdcp->shim->check_2_2_link(intel_dig_port); + if (ret == HDCP_LINK_PROTECTED) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } + goto out; + } + + if (ret == HDCP_TOPOLOGY_CHANGE) { + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + + DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n"); + ret = hdcp2_authenticate_repeater_topology(connector); + if (!ret) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + goto out; + } + DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n", + connector->base.name, connector->base.base.id, + ret); + } else { + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n", + connector->base.name, connector->base.base.id); + } + + ret = _intel_hdcp2_disable(connector); + if (ret) { + DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = _intel_hdcp2_enable(connector); + if (ret) { + DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, + ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + +out: + mutex_unlock(&hdcp->mutex); + return ret; +} + +static void intel_hdcp_check_work(struct work_struct *work) +{ + struct intel_hdcp *hdcp = container_of(to_delayed_work(work), + struct intel_hdcp, + check_work); + struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + + if (!intel_hdcp2_check_link(connector)) + schedule_delayed_work(&hdcp->check_work, + DRM_HDCP2_CHECK_PERIOD_MS); + else if (!intel_hdcp_check_link(connector)) + schedule_delayed_work(&hdcp->check_work, + DRM_HDCP_CHECK_PERIOD_MS); +} + +static int i915_hdcp_component_bind(struct device *i915_kdev, + struct device *mei_kdev, void *data) +{ + struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + + DRM_DEBUG("I915 HDCP comp bind\n"); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; + dev_priv->hdcp_master->mei_dev = mei_kdev; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return 0; +} + +static void i915_hdcp_component_unbind(struct device *i915_kdev, + struct device *mei_kdev, void *data) +{ + struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + + DRM_DEBUG("I915 HDCP comp unbind\n"); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_master = NULL; + mutex_unlock(&dev_priv->hdcp_comp_mutex); +} + +static const struct component_ops i915_hdcp_component_ops = { + .bind = i915_hdcp_component_bind, + .unbind = i915_hdcp_component_unbind, +}; + +static inline int initialize_hdcp_port_data(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + struct hdcp_port_data *data = &hdcp->port_data; + + data->port = connector->encoder->port; + data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; + data->protocol = (u8)hdcp->shim->protocol; + + data->k = 1; + if (!data->streams) + data->streams = kcalloc(data->k, + sizeof(struct hdcp2_streamid_type), + GFP_KERNEL); + if (!data->streams) { + DRM_ERROR("Out of Memory\n"); + return -ENOMEM; + } + + data->streams[0].stream_id = 0; + data->streams[0].stream_type = hdcp->content_type; + + return 0; +} + +static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) +{ + if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) + return false; + + return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + IS_KABYLAKE(dev_priv)); +} + +void intel_hdcp_component_init(struct drm_i915_private *dev_priv) +{ + int ret; + + if (!is_hdcp2_supported(dev_priv)) + return; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + WARN_ON(dev_priv->hdcp_comp_added); + + dev_priv->hdcp_comp_added = true; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, + I915_COMPONENT_HDCP); + if (ret < 0) { + DRM_DEBUG_KMS("Failed at component add(%d)\n", ret); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_comp_added = false; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return; + } +} + +static void intel_hdcp2_init(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + int ret; + + ret = initialize_hdcp_port_data(connector); + if (ret) { + DRM_DEBUG_KMS("Mei hdcp data init failed\n"); + return; + } + + hdcp->hdcp2_supported = true; } int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - ret = drm_connector_attach_content_protection_property( - &connector->base); + if (!shim) + return -EINVAL; + + ret = drm_connector_attach_content_protection_property(&connector->base); if (ret) return ret; @@ -787,28 +1804,47 @@ int intel_hdcp_init(struct intel_connector *connector, mutex_init(&hdcp->mutex); INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); + + if (is_hdcp2_supported(dev_priv)) + intel_hdcp2_init(connector); + init_waitqueue_head(&hdcp->cp_irq_queue); + return 0; } int intel_hdcp_enable(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - int ret; + unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; + int ret = -EINVAL; if (!hdcp->shim) return -ENOENT; mutex_lock(&hdcp->mutex); + WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); - ret = _intel_hdcp_enable(connector); - if (ret) - goto out; + /* + * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup + * is capable of HDCP2.2, it is preferred to use HDCP2.2. + */ + if (intel_hdcp2_capable(connector)) { + ret = _intel_hdcp2_enable(connector); + if (!ret) + check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; + } + + /* When HDCP2.2 fails, HDCP1.4 will be attempted */ + if (ret && intel_hdcp_capable(connector)) { + ret = _intel_hdcp_enable(connector); + } + + if (!ret) { + schedule_delayed_work(&hdcp->check_work, check_link_interval); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP_CHECK_PERIOD_MS); -out: mutex_unlock(&hdcp->mutex); return ret; } @@ -825,7 +1861,10 @@ int intel_hdcp_disable(struct intel_connector *connector) if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; - ret = _intel_hdcp_disable(connector); + if (hdcp->hdcp2_encrypted) + ret = _intel_hdcp2_disable(connector); + else if (hdcp->hdcp_encrypted) + ret = _intel_hdcp_disable(connector); } mutex_unlock(&hdcp->mutex); @@ -833,12 +1872,36 @@ int intel_hdcp_disable(struct intel_connector *connector) return ret; } +void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->hdcp_comp_mutex); + if (!dev_priv->hdcp_comp_added) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return; + } + + dev_priv->hdcp_comp_added = false; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); +} + +void intel_hdcp_cleanup(struct intel_connector *connector) +{ + if (!connector->hdcp.shim) + return; + + mutex_lock(&connector->hdcp.mutex); + kfree(connector->hdcp.port_data.streams); + mutex_unlock(&connector->hdcp.mutex); +} + void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state) { - uint64_t old_cp = old_state->content_protection; - uint64_t new_cp = new_state->content_protection; + u64 old_cp = old_state->content_protection; + u64 new_cp = new_state->content_protection; struct drm_crtc_state *crtc_state; if (!new_state->crtc) { @@ -866,61 +1929,16 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, crtc_state->mode_changed = true; } -/* Implements Part 3 of the HDCP authorization procedure */ -int intel_hdcp_check_link(struct intel_connector *connector) +/* Handles the CP_IRQ raised from the DP HDCP sink */ +void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *dev_priv = connector->base.dev->dev_private; - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); - enum port port = intel_dig_port->base.port; - int ret = 0; if (!hdcp->shim) - return -ENOENT; - - mutex_lock(&hdcp->mutex); - - if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) - goto out; - - if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { - DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n", - connector->base.name, connector->base.base.id, - I915_READ(PORT_HDCP_STATUS(port))); - ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } - - if (hdcp->shim->check_link(intel_dig_port)) { - if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); - } - goto out; - } - - DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", - connector->base.name, connector->base.base.id); + return; - ret = _intel_hdcp_disable(connector); - if (ret) { - DRM_ERROR("Failed to disable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } + atomic_inc(&connector->hdcp.cp_irq_count); + wake_up_all(&connector->hdcp.cp_irq_queue); - ret = _intel_hdcp_enable(connector); - if (ret) { - DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } - -out: - mutex_unlock(&hdcp->mutex); - return ret; + schedule_delayed_work(&hdcp->check_work, 0); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 07e803a604bd..26767785f14a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -30,7 +30,6 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/hdmi.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -83,6 +82,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) static u32 g4x_infoframe_index(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_SELECT_GAMUT; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_SELECT_AVI; case HDMI_INFOFRAME_TYPE_SPD: @@ -98,6 +99,12 @@ static u32 g4x_infoframe_index(unsigned int type) static u32 g4x_infoframe_enable(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GENERAL_CONTROL: + return VIDEO_DIP_ENABLE_GCP; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_ENABLE_GAMUT; + case DP_SDP_VSC: + return 0; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; case HDMI_INFOFRAME_TYPE_SPD: @@ -113,6 +120,10 @@ static u32 g4x_infoframe_enable(unsigned int type) static u32 hsw_infoframe_enable(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GENERAL_CONTROL: + return VIDEO_DIP_ENABLE_GCP_HSW; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_ENABLE_GMP_HSW; case DP_SDP_VSC: return VIDEO_DIP_ENABLE_VSC_HSW; case DP_SDP_PPS: @@ -136,6 +147,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, int i) { switch (type) { + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i); case DP_SDP_VSC: return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); case DP_SDP_PPS: @@ -201,17 +214,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, POSTING_READ(VIDEO_DIP_CTL); } -static bool g4x_infoframe_enabled(struct intel_encoder *encoder, +static void g4x_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val, *data = frame; + int i; + + val = I915_READ(VIDEO_DIP_CTL); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(VIDEO_DIP_CTL, val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(VIDEO_DIP_DATA); +} + +static u32 g4x_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); @@ -256,7 +289,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool ibx_infoframe_enabled(struct intel_encoder *encoder, +static void ibx_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -265,10 +319,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(reg); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -317,7 +371,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool cpt_infoframe_enabled(struct intel_encoder *encoder, +static void cpt_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -325,7 +400,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -371,7 +446,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool vlv_infoframe_enabled(struct intel_encoder *encoder, +static void vlv_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -379,10 +475,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -424,7 +520,24 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, POSTING_READ(ctl_reg); } -static bool hsw_infoframe_enabled(struct intel_encoder *encoder, +static void hsw_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val, *data = frame; + int i; + + val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder)); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder, + type, i >> 2)); +} + +static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -435,6 +548,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); } +static const u8 infoframe_type_to_idx[] = { + HDMI_PACKET_TYPE_GENERAL_CONTROL, + HDMI_PACKET_TYPE_GAMUT_METADATA, + DP_SDP_VSC, + HDMI_INFOFRAME_TYPE_AVI, + HDMI_INFOFRAME_TYPE_SPD, + HDMI_INFOFRAME_TYPE_VENDOR, +}; + +u32 intel_hdmi_infoframe_enable(unsigned int type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { + if (infoframe_type_to_idx[i] == type) + return BIT(i); + } + + return 0; +} + +u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + u32 val, ret = 0; + int i; + + val = dig_port->infoframes_enabled(encoder, crtc_state); + + /* map from hardware bits to dip idx */ + for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { + unsigned int type = infoframe_type_to_idx[i]; + + if (HAS_DDI(dev_priv)) { + if (val & hsw_infoframe_enable(type)) + ret |= BIT(i); + } else { + if (val & g4x_infoframe_enable(type)) + ret |= BIT(i); + } + } + + return ret; +} + /* * The data we write to the DIP data buffer registers is 1 byte bigger than the * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting @@ -454,15 +614,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder, */ static void intel_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, - union hdmi_infoframe *frame) + enum hdmi_infoframe_type type, + const union hdmi_infoframe *frame) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + if (WARN_ON(frame->any.type != type)) + return; + /* see comment above for the reason for this offset */ - len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); - if (len < 0) + len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1); + if (WARN_ON(len < 0)) return; /* Insert the 'hole' (see big comment above) at position 3 */ @@ -470,88 +638,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder, buffer[3] = 0; len++; - intel_dig_port->write_infoframe(encoder, - crtc_state, - frame->any.type, buffer, len); + intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len); } -static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +void intel_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + enum hdmi_infoframe_type type, + union hdmi_infoframe *frame) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; - struct drm_connector *connector = &intel_hdmi->attached_connector->base; - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported || - connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420; - union hdmi_infoframe frame; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + u8 buffer[VIDEO_DIP_DATA_SIZE]; int ret; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - adjusted_mode, - is_hdmi2_sink); - if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + intel_dig_port->read_infoframe(encoder, crtc_state, + type, buffer, sizeof(buffer)); + + /* Fill the 'hole' (see big comment above) at position 3 */ + memmove(&buffer[1], &buffer[0], 3); + + /* see comment above for the reason for this offset */ + ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1); + if (ret) { + DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type); return; } + if (frame->any.type != type) + DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, type); +} + +static bool +intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + int ret; + + if (!crtc_state->has_infoframe) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, + adjusted_mode); + if (ret) + return false; + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - frame.avi.colorspace = HDMI_COLORSPACE_YUV420; + frame->colorspace = HDMI_COLORSPACE_YUV420; else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) - frame.avi.colorspace = HDMI_COLORSPACE_YUV444; + frame->colorspace = HDMI_COLORSPACE_YUV444; else - frame.avi.colorspace = HDMI_COLORSPACE_RGB; + frame->colorspace = HDMI_COLORSPACE_RGB; + + drm_hdmi_avi_infoframe_colorspace(frame, conn_state); - drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode, + drm_hdmi_avi_infoframe_quant_range(frame, connector, + adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL, - intel_hdmi->rgb_quant_range_selectable, - is_hdmi2_sink); + HDMI_QUANTIZATION_RANGE_FULL); - drm_hdmi_avi_infoframe_content_type(&frame.avi, - conn_state); + drm_hdmi_avi_infoframe_content_type(frame, conn_state); /* TODO: handle pixel repetition for YCBCR420 outputs */ - intel_write_infoframe(encoder, crtc_state, - &frame); + + ret = hdmi_avi_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } -static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static bool +intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - union hdmi_infoframe frame; + struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd; int ret; - ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); - if (ret < 0) { - DRM_ERROR("couldn't fill SPD infoframe\n"); - return; - } + if (!crtc_state->has_infoframe) + return true; - frame.spd.sdi = HDMI_SPD_SDI_PC; + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD); - intel_write_infoframe(encoder, crtc_state, - &frame); + ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx"); + if (WARN_ON(ret)) + return false; + + frame->sdi = HDMI_SPD_SDI_PC; + + ret = hdmi_spd_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } -static void -intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - union hdmi_infoframe frame; +static bool +intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_vendor_infoframe *frame = + &crtc_state->infoframes.hdmi.vendor.hdmi; + const struct drm_display_info *info = + &conn_state->connector->display_info; int ret; - ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, + if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR); + + ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, conn_state->connector, &crtc_state->base.adjusted_mode); - if (ret < 0) - return; + if (WARN_ON(ret)) + return false; - intel_write_infoframe(encoder, crtc_state, - &frame); + ret = hdmi_vendor_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } static void g4x_set_infoframes(struct intel_encoder *encoder, @@ -611,9 +834,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state) @@ -679,7 +908,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg; - u32 val = 0; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) + return false; if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); @@ -690,18 +922,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, else return false; + I915_WRITE(reg, crtc_state->infoframes.gcp); + + return true; +} + +void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + i915_reg_t reg; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) + return; + + if (HAS_DDI(dev_priv)) + reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); + else if (HAS_PCH_SPLIT(dev_priv)) + reg = TVIDEO_DIP_GCP(crtc->pipe); + else + return; + + crtc_state->infoframes.gcp = I915_READ(reg); +} + +static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_G4X(dev_priv) || !crtc_state->has_infoframe) + return; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL); + /* Indicate color depth whenever the sink supports deep color */ if (hdmi_sink_is_deep_color(conn_state)) - val |= GCP_COLOR_INDICATION; + crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION; /* Enable default_phase whenever the display mode is suitably aligned */ if (gcp_default_phase_possible(crtc_state->pipe_bpp, &crtc_state->base.adjusted_mode)) - val |= GCP_DEFAULT_PHASE_ENABLE; - - I915_WRITE(reg, val); - - return val != 0; + crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE; } static void ibx_set_infoframes(struct intel_encoder *encoder, @@ -752,9 +1020,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void cpt_set_infoframes(struct intel_encoder *encoder, @@ -795,9 +1069,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void vlv_set_infoframes(struct intel_encoder *encoder, @@ -847,9 +1127,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void hsw_set_infoframes(struct intel_encoder *encoder, @@ -880,9 +1166,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) @@ -1088,10 +1380,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, return ret; } +static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_crtc *crtc = connector->base.state->crtc; + struct intel_crtc *intel_crtc = container_of(crtc, + struct intel_crtc, base); + u32 scanline; + int ret; + + for (;;) { + scanline = I915_READ(PIPEDSL(intel_crtc->pipe)); + if (scanline > 100 && scanline < 200) + break; + usleep_range(25, 50); + } + + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false); + if (ret) { + DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret); + return ret; + } + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true); + if (ret) { + DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret); + return ret; + } + + return 0; +} + static int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, bool enable) { + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct intel_connector *connector = hdmi->attached_connector; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int ret; if (!enable) @@ -1103,6 +1429,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, enable ? "Enable" : "Disable", ret); return ret; } + + /* + * WA: To fix incorrect positioning of the window of + * opportunity and enc_en signalling in KABYLAKE. + */ + if (IS_KABYLAKE(dev_priv) && enable) + return kbl_repositioning_enc_en_signal(connector); + return 0; } @@ -1134,6 +1468,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) return true; } +static struct hdcp2_hdmi_msg_data { + u8 msg_id; + u32 timeout; + u32 timeout2; + } hdcp2_msg_data[] = { + {HDCP_2_2_AKE_INIT, 0, 0}, + {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0}, + {HDCP_2_2_AKE_NO_STORED_KM, 0, 0}, + {HDCP_2_2_AKE_STORED_KM, 0, 0}, + {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS}, + {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, + 0}, + {HDCP_2_2_LC_INIT, 0, 0}, + {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0}, + {HDCP_2_2_SKE_SEND_EKS, 0, 0}, + {HDCP_2_2_REP_SEND_RECVID_LIST, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0}, + {HDCP_2_2_REP_SEND_ACK, 0, 0}, + {HDCP_2_2_REP_STREAM_MANAGE, 0, 0}, + {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, + 0}, + }; + +static +int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) +{ + return intel_hdmi_hdcp_read(intel_dig_port, + HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET, + rx_status, + HDCP_2_2_HDMI_RXSTATUS_LEN); +} + +static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) + if (hdcp2_msg_data[i].msg_id == msg_id && + (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired)) + return hdcp2_msg_data[i].timeout; + else if (hdcp2_msg_data[i].msg_id == msg_id) + return hdcp2_msg_data[i].timeout2; + + return -EINVAL; +} + +static inline +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port, + u8 msg_id, bool *msg_ready, + ssize_t *msg_sz) +{ + u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; + int ret; + + ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status); + if (ret < 0) { + DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret); + return ret; + } + + *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) | + rx_status[0]); + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) + *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) && + *msg_sz); + else + *msg_ready = *msg_sz; + + return 0; +} + +static ssize_t +intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool paired) +{ + bool msg_ready = false; + int timeout, ret; + ssize_t msg_sz = 0; + + timeout = get_hdcp2_msg_timeout(msg_id, paired); + if (timeout < 0) + return timeout; + + ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port, + msg_id, &msg_ready, + &msg_sz), + !ret && msg_ready && msg_sz, timeout * 1000, + 1000, 5 * 1000); + if (ret) + DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n", + msg_id, ret, timeout); + + return ret ? ret : msg_sz; +} + +static +int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, + void *buf, size_t size) +{ + unsigned int offset; + + offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET; + return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size); +} + +static +int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; + unsigned int offset; + ssize_t ret; + + ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id, + hdcp->is_paired); + if (ret < 0) + return ret; + + /* + * Available msg size should be equal to or lesser than the + * available buffer. + */ + if (ret > size) { + DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n", + ret, size); + return -1; + } + + offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; + ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret); + if (ret) + DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret); + + return ret; +} + +static +int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +{ + u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; + int ret; + + ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status); + if (ret) + return ret; + + /* + * Re-auth request and Link Integrity Failures are represented by + * same bit. i.e reauth_req. + */ + if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1])) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1])) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port, + bool *capable) +{ + u8 hdcp2_version; + int ret; + + *capable = false; + ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET, + &hdcp2_version, sizeof(hdcp2_version)); + if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK) + *capable = true; + + return ret; +} + +static inline +enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void) +{ + return HDCP_PROTOCOL_HDMI; +} + static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, .read_bksv = intel_hdmi_hdcp_read_bksv, @@ -1145,6 +1663,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part, .toggle_signalling = intel_hdmi_hdcp_toggle_signalling, .check_link = intel_hdmi_hdcp_check_link, + .write_2_2_msg = intel_hdmi_hdcp2_write_msg, + .read_2_2_msg = intel_hdmi_hdcp2_read_msg, + .check_2_2_link = intel_hdmi_hdcp2_check_link, + .hdcp_2_2_capable = intel_hdmi_hdcp2_capable, + .protocol = HDCP_PROTOCOL_HDMI, }; static void intel_hdmi_prepare(struct intel_encoder *encoder, @@ -1191,15 +1714,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -1208,7 +1733,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; @@ -1231,7 +1755,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); + + if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; if (tmp & SDVO_AUDIO_ENABLE) @@ -1254,6 +1781,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.crtc_clock = dotclock; pipe_config->lane_count = 4; + + intel_hdmi_read_gcp_infoframe(encoder, pipe_config); + + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_AVI, + &pipe_config->infoframes.avi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_SPD, + &pipe_config->infoframes.spd); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_VENDOR, + &pipe_config->infoframes.hdmi); } static void intel_enable_hdmi_audio(struct intel_encoder *encoder, @@ -1591,7 +2130,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, if (hdmi->has_hdmi_sink && !force_dvi) { /* if we can't do 8bpc we may still be able to do 12bpc */ - if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv)) + if (status != MODE_OK && !HAS_GMCH(dev_priv)) status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi); @@ -1616,7 +2155,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, &crtc_state->base.adjusted_mode; int i; - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) return false; if (bpc == 10 && INTEL_GEN(dev_priv) < 11) @@ -1667,7 +2206,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, /* Display Wa_1405510057:icl */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && - bpc == 10 && IS_ICELAKE(dev_priv) && + bpc == 10 && INTEL_GEN(dev_priv) >= 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) return false; @@ -1707,9 +2246,9 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return true; } -bool intel_hdmi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +int intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1725,7 +2264,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; @@ -1756,7 +2295,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, &clock_12bpc, &clock_10bpc, &clock_8bpc)) { DRM_ERROR("Can't support YCBCR420 output\n"); - return false; + return -EINVAL; } } @@ -1806,7 +2345,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, false, force_dvi) != MODE_OK) { DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); - return false; + return -EINVAL; } /* Set user selected PAR to incoming mode's member */ @@ -1825,7 +2364,24 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, } } - return true; + intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state); + + if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad AVI infoframe\n"); + return -EINVAL; + } + + if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad SPD infoframe\n"); + return -EINVAL; + } + + if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad HDMI infoframe\n"); + return -EINVAL; + } + + return 0; } static void @@ -1835,7 +2391,6 @@ intel_hdmi_unset_edid(struct drm_connector *connector) intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; - intel_hdmi->rgb_quant_range_selectable = false; intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; intel_hdmi->dp_dual_mode.max_tmds_clock = 0; @@ -1896,11 +2451,12 @@ intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + intel_wakeref_t wakeref; struct edid *edid; bool connected = false; struct i2c_adapter *i2c; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); @@ -1915,13 +2471,10 @@ intel_hdmi_set_edid(struct drm_connector *connector) intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); to_intel_connector(connector)->detect_edid = edid; if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { - intel_hdmi->rgb_quant_range_selectable = - drm_rgb_quant_range_selectable(edid); - intel_hdmi->has_audio = drm_detect_monitor_audio(edid); intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); @@ -1940,13 +2493,14 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - if (IS_ICELAKE(dev_priv) && + if (INTEL_GEN(dev_priv) >= 11 && !intel_digital_port_connected(encoder)) goto out; @@ -1956,7 +2510,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) status = connector_status_connected; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); if (status != connector_status_connected) cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); @@ -2148,14 +2702,25 @@ static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); + + /* + * Attach Colorspace property for Non LSPCON based device + * ToDo: This needs to be extended for LSPCON implementation + * as well. Will be implemented separately. + */ + if (!intel_dig_port->lspcon.active) + intel_attach_colorspace_property(connector); + drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) drm_connector_attach_max_bpc_property(connector, 8, 12); } @@ -2336,14 +2901,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, return info->alternate_ddc_pin; } - if (IS_CHERRYVIEW(dev_priv)) - ddc_pin = chv_port_to_ddc_pin(dev_priv, port); - else if (IS_GEN9_LP(dev_priv)) - ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); + if (HAS_PCH_ICP(dev_priv)) + ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_ICP(dev_priv)) - ddc_pin = icl_port_to_ddc_pin(dev_priv, port); + else if (IS_GEN9_LP(dev_priv)) + ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); + else if (IS_CHERRYVIEW(dev_priv)) + ddc_pin = chv_port_to_ddc_pin(dev_priv, port); else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); @@ -2360,33 +2925,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_dig_port->write_infoframe = vlv_write_infoframe; + intel_dig_port->read_infoframe = vlv_read_infoframe; intel_dig_port->set_infoframes = vlv_set_infoframes; - intel_dig_port->infoframe_enabled = vlv_infoframe_enabled; + intel_dig_port->infoframes_enabled = vlv_infoframes_enabled; } else if (IS_G4X(dev_priv)) { intel_dig_port->write_infoframe = g4x_write_infoframe; + intel_dig_port->read_infoframe = g4x_read_infoframe; intel_dig_port->set_infoframes = g4x_set_infoframes; - intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; + intel_dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { if (intel_dig_port->lspcon.active) { - intel_dig_port->write_infoframe = - lspcon_write_infoframe; + intel_dig_port->write_infoframe = lspcon_write_infoframe; + intel_dig_port->read_infoframe = lspcon_read_infoframe; intel_dig_port->set_infoframes = lspcon_set_infoframes; - intel_dig_port->infoframe_enabled = - lspcon_infoframe_enabled; + intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled; } else { - intel_dig_port->set_infoframes = hsw_set_infoframes; - intel_dig_port->infoframe_enabled = - hsw_infoframe_enabled; intel_dig_port->write_infoframe = hsw_write_infoframe; + intel_dig_port->read_infoframe = hsw_read_infoframe; + intel_dig_port->set_infoframes = hsw_set_infoframes; + intel_dig_port->infoframes_enabled = hsw_infoframes_enabled; } } else if (HAS_PCH_IBX(dev_priv)) { intel_dig_port->write_infoframe = ibx_write_infoframe; + intel_dig_port->read_infoframe = ibx_read_infoframe; intel_dig_port->set_infoframes = ibx_set_infoframes; - intel_dig_port->infoframe_enabled = ibx_infoframe_enabled; + intel_dig_port->infoframes_enabled = ibx_infoframes_enabled; } else { intel_dig_port->write_infoframe = cpt_write_infoframe; + intel_dig_port->read_infoframe = cpt_read_infoframe; intel_dig_port->set_infoframes = cpt_set_infoframes; - intel_dig_port->infoframe_enabled = cpt_infoframe_enabled; + intel_dig_port->infoframes_enabled = cpt_infoframes_enabled; } } @@ -2432,6 +3000,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); + intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_hdmi->attached_connector = intel_connector; + if (is_hdcp_supported(dev_priv, port)) { int ret = intel_hdcp_init(intel_connector, &intel_hdmi_hdcp_shim); @@ -2439,9 +3010,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); } - intel_connector_attach_encoder(intel_connector, intel_encoder); - intel_hdmi->attached_connector = intel_connector; - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index e24174d08fed..b8937c788f03 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -23,7 +23,6 @@ #include <linux/kernel.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -227,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; + intel_wakeref_t wakeref; enum hpd_pin pin; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->irq_lock); for_each_hpd_pin(pin) { @@ -262,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } bool intel_encoder_hotplug(struct intel_encoder *encoder, @@ -470,7 +470,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), + WARN_ONCE(!HAS_GMCH(dev_priv), "Received HPD interrupt on pin %d although disabled\n", pin); continue; } diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index bc27b691d824..94c04f16a2ad 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc) } /* Check authentication status, it should be done by now */ - ret = __intel_wait_for_register(i915, + ret = __intel_wait_for_register(&i915->uncore, HUC_STATUS2, HUC_FW_VERIFIED, HUC_FW_VERIFIED, @@ -115,14 +115,14 @@ fail: int intel_huc_check_status(struct intel_huc *huc) { struct drm_i915_private *dev_priv = huc_to_i915(huc); - bool status; + intel_wakeref_t wakeref; + bool status = false; if (!HAS_HUC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); - status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) + status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; return status; } diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index f93d2384d482..68d47c105939 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -23,8 +23,8 @@ */ #define BXT_HUC_FW_MAJOR 01 -#define BXT_HUC_FW_MINOR 07 -#define BXT_BLD_NUM 1398 +#define BXT_HUC_FW_MINOR 8 +#define BXT_BLD_NUM 2893 #define SKL_HUC_FW_MAJOR 01 #define SKL_HUC_FW_MINOR 07 @@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) huc_fw->path = I915_KBL_HUC_UCODE; huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; - } else { - DRM_WARN("%s: No firmware known for this platform!\n", - intel_uc_fw_type_repr(huc_fw->type)); } } @@ -109,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct intel_uncore *uncore = &dev_priv->uncore; unsigned long offset = 0; u32 size; int ret; GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) + huc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); + intel_uncore_write(uncore, DMA_ADDR_0_LOW, + lower_32_bits(offset)); + intel_uncore_write(uncore, DMA_ADDR_0_HIGH, + upper_32_bits(offset) & 0xFFFF); - /* Hardware doesn't look at destination address for HuC. Set it to 0, + /* + * Hardware doesn't look at destination address for HuC. Set it to 0, * but still program the correct address space. */ - I915_WRITE(DMA_ADDR_1_LOW, 0); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); + intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); size = huc_fw->header_size + huc_fw->ucode_size; - I915_WRITE(DMA_COPY_SIZE, size); + intel_uncore_write(uncore, DMA_COPY_SIZE, size); /* Start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); + intel_uncore_write(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); /* Wait for DMA to finish */ - ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100); + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); /* Disable the bits once DMA is over */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); + intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return ret; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 802d0394ccc4..422685d120e9 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -29,7 +29,6 @@ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/export.h> -#include <drm/drmP.h> #include <drm/drm_hdcp.h> #include "intel_drv.h" #include <drm/i915_drm.h> @@ -349,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); I915_WRITE_FW(GMBUS4, irq_enable); - ret = intel_wait_for_register_fw(dev_priv, + ret = intel_wait_for_register_fw(&dev_priv->uncore, GMBUS2, GMBUS_ACTIVE, 0, 10); @@ -698,12 +697,13 @@ out: static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { - struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, - adapter); + struct intel_gmbus *bus = + container_of(adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; + intel_wakeref_t wakeref; int ret; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); @@ -715,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); return ret; } int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { - struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, - adapter); + struct intel_gmbus *bus = + container_of(adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int ret; u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; struct i2c_msg msgs[] = { @@ -742,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) .buf = buf, } }; + intel_wakeref_t wakeref; + int ret; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); mutex_lock(&dev_priv->gmbus_mutex); /* @@ -754,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); mutex_unlock(&dev_priv->gmbus_mutex); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); return ret; } @@ -822,7 +823,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; - else if (!HAS_GMCH_DISPLAY(dev_priv)) + else if (!HAS_GMCH(dev_priv)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 5d5336fbe7b0..f8239bca3820 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -65,6 +65,7 @@ #include <linux/irq.h> #include <linux/pci.h> #include <linux/pm_runtime.h> +#include <linux/platform_device.h> #include "i915_drv.h" #include <linux/delay.h> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d7fa301b5ec7..bec232acc8d7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -133,10 +133,10 @@ */ #include <linux/interrupt.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_reset.h" #include "i915_vgpu.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" @@ -164,11 +164,12 @@ #define WA_TAIL_DWORDS 2 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce); +#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE) + +static int execlists_context_deferred_alloc(struct intel_context *ce, + struct intel_engine_cs *engine); static void execlists_init_reg_state(u32 *reg_state, - struct i915_gem_context *ctx, + struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring); @@ -182,13 +183,118 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority; } +static int effective_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + /* + * On unwinding the active request, we give it a priority bump + * equivalent to a freshly submitted request. This protects it from + * being gazumped again, but it would be preferable if we didn't + * let it be gazumped in the first place! + * + * See __unwind_incomplete_requests() + */ + if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) { + /* + * After preemption, we insert the active request at the + * end of the new priority level. This means that we will be + * _lower_ priority than the preemptee all things equal (and + * so the preemption is valid), so adjust our comparison + * accordingly. + */ + prio |= ACTIVE_PRIORITY; + prio--; + } + + /* Restrict mere WAIT boosts from triggering preemption */ + return prio | __NO_PREEMPTION; +} + +static int queue_prio(const struct intel_engine_execlists *execlists) +{ + struct i915_priolist *p; + struct rb_node *rb; + + rb = rb_first_cached(&execlists->queue); + if (!rb) + return INT_MIN; + + /* + * As the priolist[] are inverted, with the highest priority in [0], + * we have to flip the index value to become priority. + */ + p = to_priolist(rb); + return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); +} + static inline bool need_preempt(const struct intel_engine_cs *engine, - const struct i915_request *last, - int prio) + const struct i915_request *rq) +{ + int last_prio; + + if (!intel_engine_has_preemption(engine)) + return false; + + if (i915_request_completed(rq)) + return false; + + /* + * Check if the current priority hint merits a preemption attempt. + * + * We record the highest value priority we saw during rescheduling + * prior to this dequeue, therefore we know that if it is strictly + * less than the current tail of ESLP[0], we do not need to force + * a preempt-to-idle cycle. + * + * However, the priority hint is a mere hint that we may need to + * preempt. If that hint is stale or we may be trying to preempt + * ourselves, ignore the request. + */ + last_prio = effective_prio(rq); + if (!__execlists_need_preempt(engine->execlists.queue_priority_hint, + last_prio)) + return false; + + /* + * Check against the first request in ELSP[1], it will, thanks to the + * power of PI, be the highest priority of that context. + */ + if (!list_is_last(&rq->link, &engine->timeline.requests) && + rq_prio(list_next_entry(rq, link)) > last_prio) + return true; + + /* + * If the inflight context did not trigger the preemption, then maybe + * it was the set of queued requests? Pick the highest priority in + * the queue (the first active priolist) and see if it deserves to be + * running instead of ELSP[0]. + * + * The highest priority request in the queue can not be either + * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same + * context, it's priority would not exceed ELSP[0] aka last_prio. + */ + return queue_prio(&engine->execlists) > last_prio; +} + +__maybe_unused static inline bool +assert_priority_queue(const struct i915_request *prev, + const struct i915_request *next) { - return (intel_engine_has_preemption(engine) && - __execlists_need_preempt(prio, rq_prio(last)) && - !i915_request_completed(last)); + const struct intel_engine_execlists *execlists = + &prev->engine->execlists; + + /* + * Without preemption, the prev may refer to the still active element + * which we refuse to let go. + * + * Even with preemption, there are times when we think it is better not + * to preempt and leave an ostensibly lower priority request in flight. + */ + if (port_request(execlists->port) == prev) + return true; + + return rq_prio(prev) >= rq_prio(next); } /* @@ -217,11 +323,10 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static void -intel_lr_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce) +static u64 +lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { + struct i915_gem_context *ctx = ce->gem_context; u64 desc; BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); @@ -239,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, * Consider updating oa_get_render_ctx_id in i915_perf.c when changing * anything below. */ - if (INTEL_GEN(ctx->i915) >= 11) { + if (INTEL_GEN(engine->i915) >= 11) { GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; /* bits 37-47 */ @@ -256,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ } - ce->lrc_desc = desc; + return desc; } static void unwind_wa_tail(struct i915_request *rq) @@ -265,11 +370,12 @@ static void unwind_wa_tail(struct i915_request *rq) assert_ring_tail_valid(rq->ring, rq->tail); } -static void __unwind_incomplete_requests(struct intel_engine_cs *engine) +static struct i915_request * +__unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; struct list_head *uninitialized_var(pl); - int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT; + int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY; lockdep_assert_held(&engine->timeline.lock); @@ -300,12 +406,27 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) * The active request is now effectively the start of a new client * stream, so give it the equivalent small priority bump to prevent * it being gazumped a second time by another peer. + * + * Note we have to be careful not to apply a priority boost to a request + * still spinning on its semaphores. If the request hasn't started, that + * means it is still waiting for its dependencies to be signaled, and + * if we apply a priority boost to this request, we will boost it past + * its signalers and so break PI. + * + * One consequence of this preemption boost is that we may jump + * over lesser priorities (such as I915_PRIORITY_WAIT), effectively + * making those priorities non-preemptible. They will be moved forward + * in the priority queue, but they will not gain immediate access to + * the GPU. */ - if (!(prio & I915_PRIORITY_NEWCLIENT)) { - prio |= I915_PRIORITY_NEWCLIENT; + if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) { + prio |= ACTIVE_PRIORITY; + active->sched.attr.priority = prio; list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); } + + return active; } void @@ -363,31 +484,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status) trace_i915_request_out(rq); } -static void -execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) -{ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); -} - static u64 execlists_update_context(struct i915_request *rq) { - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; struct intel_context *ce = rq->hw_context; - u32 *reg_state = ce->lrc_reg_state; - reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); - - /* - * True 32b PPGTT with dynamic page allocation: update PDP - * registers and point the unallocated PDPs to scratch page. - * PML4 is allocated during ppgtt init, so this is not needed - * in 48-bit mode. - */ - if (!i915_vm_is_48bit(&ppgtt->vm)) - execlists_update_context_pdps(ppgtt, reg_state); + ce->lrc_reg_state[CTX_RING_TAIL + 1] = + intel_ring_set_tail(rq->ring, rq->tail); /* * Make sure the context image is complete before we submit it to HW. @@ -398,8 +500,13 @@ static u64 execlists_update_context(struct i915_request *rq) * may not be visible to the HW prior to the completion of the UC * register write and that we may begin execution from the context * before its image is complete leading to invalid PD chasing. + * + * Furthermore, Braswell, at least, wants a full mb to be sure that + * the writes are coherent in memory (visible to the GPU) prior to + * execution, and not just visible to other CPUs (as is the result of + * wmb). */ - wmb(); + mb(); return ce->lrc_desc; } @@ -450,12 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) desc = execlists_update_context(rq); GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); - GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", + GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", engine->name, n, port[n].context_id, count, - rq->global_seqno, rq->fence.context, rq->fence.seqno, - intel_engine_get_seqno(engine), + hwsp_seqno(rq), rq_prio(rq)); } else { GEM_BUG_ON(!n); @@ -490,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev, return true; } +static bool can_merge_rq(const struct i915_request *prev, + const struct i915_request *next) +{ + GEM_BUG_ON(!assert_priority_queue(prev, next)); + + if (!can_merge_ctx(prev->hw_context, next->hw_context)) + return false; + + return true; +} + static void port_assign(struct execlist_port *port, struct i915_request *rq) { GEM_BUG_ON(rq == port_request(port)); @@ -503,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq) static void inject_preempt_context(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - struct intel_context *ce = - to_intel_context(engine->i915->preempt_context, engine); + struct intel_context *ce = engine->preempt_context; unsigned int n; GEM_BUG_ON(execlists->preempt_complete_status != @@ -526,6 +642,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine) execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); + + (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); } static void complete_preempt_context(struct intel_engine_execlists *execlists) @@ -594,7 +712,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) return; - if (need_preempt(engine, last, execlists->queue_priority)) { + if (need_preempt(engine, last)) { inject_preempt_context(engine); return; } @@ -627,7 +745,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * WaIdleLiteRestore:bdw,skl * Apply the wa NOOPs to prevent * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_breadcrumb() for + * request. See gen8_emit_fini_breadcrumb() for * where we prepare the padding after the * end of the request. */ @@ -651,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * second request, and so we never need to tell the * hardware about the first. */ - if (last && - !can_merge_ctx(rq->hw_context, last->hw_context)) { + if (last && !can_merge_rq(last, rq)) { /* * If we are on the second port and cannot * combine this request with the last, then we @@ -662,6 +779,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) goto done; /* + * We must not populate both ELSP[] with the + * same LRCA, i.e. we must submit 2 different + * contexts if we submit 2 ELSP. + */ + if (last->hw_context == rq->hw_context) + goto done; + + /* * If GVT overrides us we only ever submit * port[0], leaving port[1] empty. Note that we * also have to be careful that we don't queue @@ -672,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ctx_single_port_submission(rq->hw_context)) goto done; - GEM_BUG_ON(last->hw_context == rq->hw_context); if (submit) port_assign(port, last); @@ -691,29 +815,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: /* * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. * - * We choose queue_priority such that if we add a request of greater + * We choose the priority hint such that if we add a request of greater * priority than this, we kick the submission tasklet to decide on * the right order of submitting the requests to hardware. We must * also be prepared to reorder requests as they are in-flight on the - * HW. We derive the queue_priority then as the first "hole" in + * HW. We derive the priority hint then as the first "hole" in * the HW submission ports and if there are no available slots, * the priority of the lowest executing request, i.e. last. * * When we do receive a higher priority request ready to run from the - * user, see queue_request(), the queue_priority is bumped to that + * user, see queue_request(), the priority hint is bumped to that * request triggering preemption on the next dequeue (or subsequent * interrupt for secondary ports). */ - execlists->queue_priority = - port != execlists->port ? rq_prio(last) : INT_MIN; + execlists->queue_priority_hint = queue_prio(execlists); if (submit) { port_assign(port, last); @@ -743,12 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) while (num_ports-- && port_isset(port)) { struct i915_request *rq = port_request(port); - GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n", + GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n", rq->engine->name, (unsigned int)(port - execlists->port), - rq->global_seqno, rq->fence.context, rq->fence.seqno, - intel_engine_get_seqno(rq->engine)); + hwsp_seqno(rq)); GEM_BUG_ON(!execlists->active); execlists_context_schedule_out(rq, @@ -765,6 +886,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) execlists_clear_all_active(execlists); } +static inline void +invalidate_csb_entries(const u32 *first, const u32 *last) +{ + clflush((void *)first); + clflush((void *)last); +} + static void reset_csb_pointers(struct intel_engine_execlists *execlists) { const unsigned int reset_value = GEN8_CSB_ENTRIES - 1; @@ -780,6 +908,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) */ execlists->csb_head = reset_value; WRITE_ONCE(*execlists->csb_write, reset_value); + + invalidate_csb_entries(&execlists->csb_status[0], + &execlists->csb_status[GEN8_CSB_ENTRIES - 1]); } static void nop_submission_tasklet(unsigned long data) @@ -794,8 +925,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) struct rb_node *rb; unsigned long flags; - GEM_TRACE("%s current %d\n", - engine->name, intel_engine_get_seqno(engine)); + GEM_TRACE("%s\n", engine->name); /* * Before we call engine->cancel_requests(), we should have exclusive @@ -819,12 +949,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline.requests, link) { - GEM_BUG_ON(!rq->global_seqno); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) - continue; + if (!i915_request_signaled(rq)) + dma_fence_set_error(&rq->fence, -EIO); - dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); } /* Flush the queued requests to the timeline list (for retiring). */ @@ -834,23 +962,18 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) priolist_for_each_request_consume(rq, rn, p, i) { list_del_init(&rq->sched.link); - - dma_fence_set_error(&rq->fence, -EIO); __i915_request_submit(rq); + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } - intel_write_status_page(engine, - I915_GEM_HWS_INDEX, - intel_engine_last_submit(engine)); - /* Remaining _unready_ requests will be nop'ed when submitted */ - execlists->queue_priority = INT_MIN; + execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; GEM_BUG_ON(port_isset(execlists->port)); @@ -873,6 +996,8 @@ static void process_csb(struct intel_engine_cs *engine) const u32 * const buf = execlists->csb_status; u8 head, tail; + lockdep_assert_held(&engine->timeline.lock); + /* * Note that csb_write, csb_status may be either in HWSP or mmio. * When reading from the csb_write mmio register, we have to be @@ -961,13 +1086,12 @@ static void process_csb(struct intel_engine_cs *engine) EXECLISTS_ACTIVE_USER)); rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", + GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", engine->name, port->context_id, count, - rq ? rq->global_seqno : 0, rq ? rq->fence.context : 0, rq ? rq->fence.seqno : 0, - intel_engine_get_seqno(engine), + rq ? hwsp_seqno(rq) : 0, rq ? rq_prio(rq) : 0); /* Check the context/desc id for this event matches */ @@ -1015,6 +1139,19 @@ static void process_csb(struct intel_engine_cs *engine) } while (head != tail); execlists->csb_head = head; + + /* + * Gen11 has proven to fail wrt global observation point between + * entry and tail update, failing on the ordering and thus + * we see an old entry in the context status buffer. + * + * Forcibly evict out entries for the next gpu csb update, + * to increase the odds that we get a fresh entries with non + * working hardware. The cost for doing so comes out mostly with + * the wash as hardware, working or not, will need to do the + * invalidation before. + */ + invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]); } static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) @@ -1037,7 +1174,7 @@ static void execlists_submission_tasklet(unsigned long data) GEM_TRACE("%s awake?=%d, active=%x\n", engine->name, - engine->i915->gt.awake, + !!engine->i915->gt.awake, engine->execlists.active); spin_lock_irqsave(&engine->timeline.lock, flags); @@ -1067,8 +1204,8 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) static void submit_queue(struct intel_engine_cs *engine, int prio) { - if (prio > engine->execlists.queue_priority) { - engine->execlists.queue_priority = prio; + if (prio > engine->execlists.queue_priority_hint) { + engine->execlists.queue_priority_hint = prio; __submit_queue_imm(engine); } } @@ -1091,19 +1228,50 @@ static void execlists_submit_request(struct i915_request *request) spin_unlock_irqrestore(&engine->timeline.lock, flags); } -static void execlists_context_destroy(struct intel_context *ce) +static void __execlists_context_fini(struct intel_context *ce) { - GEM_BUG_ON(ce->pin_count); - - if (!ce->state) - return; - - intel_ring_free(ce->ring); + intel_ring_put(ce->ring); GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } +static void execlists_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __execlists_context_fini(ce); + + intel_context_free(ce); +} + +static int __context_pin(struct i915_vma *vma) +{ + unsigned int flags; + int err; + + flags = PIN_GLOBAL | PIN_HIGH; + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return err; + + vma->obj->pin_global++; + vma->obj->mm.dirty = true; + + return 0; +} + +static void __context_unpin(struct i915_vma *vma) +{ + vma->obj->pin_global--; + __i915_vma_unpin(vma); +} + static void execlists_context_unpin(struct intel_context *ce) { struct intel_engine_cs *engine; @@ -1132,54 +1300,50 @@ static void execlists_context_unpin(struct intel_context *ce) intel_ring_unpin(ce->ring); - ce->state->obj->pin_global--; i915_gem_object_unpin_map(ce->state->obj); - i915_vma_unpin(ce->state); - - i915_gem_context_put(ce->gem_context); + __context_unpin(ce->state); } -static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) +static void +__execlists_update_reg_state(struct intel_context *ce, + struct intel_engine_cs *engine) { - unsigned int flags; - int err; + struct intel_ring *ring = ce->ring; + u32 *regs = ce->lrc_reg_state; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_wc_domain(vma->obj, true); - if (err) - return err; - } + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); - flags = PIN_GLOBAL | PIN_HIGH; - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); + regs[CTX_RING_HEAD + 1] = ring->head; + regs[CTX_RING_TAIL + 1] = ring->tail; - return i915_vma_pin(vma, 0, 0, flags); + /* RPCS */ + if (engine->class == RENDER_CLASS) + regs[CTX_R_PWR_CLK_STATE + 1] = + gen8_make_rpcs(engine->i915, &ce->sseu); } -static struct intel_context * -__execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - struct intel_context *ce) +static int +__execlists_context_pin(struct intel_context *ce, + struct intel_engine_cs *engine) { void *vaddr; int ret; - ret = execlists_context_deferred_alloc(ctx, engine, ce); + GEM_BUG_ON(!ce->gem_context->ppgtt); + + ret = execlists_context_deferred_alloc(ce, engine); if (ret) goto err; GEM_BUG_ON(!ce->state); - ret = __context_pin(ctx, ce->state); + ret = __context_pin(ce->state); if (ret) goto err; vaddr = i915_gem_object_pin_map(ce->state->obj, - i915_coherent_map_type(ctx->i915) | + i915_coherent_map_type(engine->i915) | I915_MAP_OVERRIDE); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); @@ -1190,81 +1354,151 @@ __execlists_context_pin(struct intel_engine_cs *engine, if (ret) goto unpin_map; - ret = i915_gem_context_pin_hw_id(ctx); + ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) goto unpin_ring; - intel_lr_context_descriptor_update(ctx, engine, ce); - - GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head)); - + ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = - i915_ggtt_offset(ce->ring->vma); - ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head; - ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail; + __execlists_update_reg_state(ce, engine); - ce->state->obj->pin_global++; - i915_gem_context_get(ctx); - return ce; + return 0; unpin_ring: intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_vma: - __i915_vma_unpin(ce->state); + __context_unpin(ce->state); err: - ce->pin_count = 0; - return ERR_PTR(ret); + return ret; +} + +static int execlists_context_pin(struct intel_context *ce) +{ + return __execlists_context_pin(ce, ce->engine); } static const struct intel_context_ops execlists_context_ops = { + .pin = execlists_context_pin, .unpin = execlists_context_unpin, .destroy = execlists_context_destroy, }; -static struct intel_context * -execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static int gen8_emit_init_breadcrumb(struct i915_request *rq) { - struct intel_context *ce = to_intel_context(ctx, engine); + u32 *cs; + + GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Check if we have been preempted before we even get started. + * + * After this point i915_request_started() reports true, even if + * we get preempted and so are no longer running. + */ + *cs++ = MI_ARB_CHECK; + *cs++ = MI_NOOP; - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(!ctx->ppgtt); + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = rq->timeline->hwsp_offset; + *cs++ = 0; + *cs++ = rq->fence.seqno - 1; - if (likely(ce->pin_count++)) - return ce; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ + intel_ring_advance(rq, cs); - ce->ops = &execlists_context_ops; + /* Record the updated position of the request's payload */ + rq->infix = intel_ring_offset(rq, cs); - return __execlists_context_pin(engine, ctx, ce); + return 0; +} + +static int emit_pdps(struct i915_request *rq) +{ + const struct intel_engine_cs * const engine = rq->engine; + struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt; + int err, i; + u32 *cs; + + GEM_BUG_ON(intel_vgpu_active(rq->i915)); + + /* + * Beware ye of the dragons, this sequence is magic! + * + * Small changes to this sequence can cause anything from + * GPU hangs to forcewake errors and machine lockups! + */ + + /* Flush any residual operations from the context load */ + err = engine->emit_flush(rq, EMIT_FLUSH); + if (err) + return err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; + + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Ensure the LRI have landed before we invalidate & continue */ + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + /* Be doubly sure the LRI have landed before proceeding */ + err = engine->emit_flush(rq, EMIT_FLUSH); + if (err) + return err; + + /* Re-invalidate the TLB for luck */ + return engine->emit_flush(rq, EMIT_INVALIDATE); } static int execlists_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!request->hw_context->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); - /* Flush enough space to reduce the likelihood of waiting after + /* + * Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just * have to repeat work. */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - ret = intel_ring_wait_for_space(request->ring, request->reserved_space); - if (ret) - return ret; - - /* Note that after this point, we have committed to using + /* + * Note that after this point, we have committed to using * this request as it is being used to both track the * state of engine initialisation and liveness of the * golden renderstate above. Think twice before you try * to cancel/unwind this request now. */ + /* Unconditionally invalidate GPU caches and TLBs. */ + if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm)) + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + else + ret = emit_pdps(request); + if (ret) + return ret; + request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; } @@ -1525,7 +1759,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) unsigned int i; int ret; - if (GEM_DEBUG_WARN_ON(engine->id != RCS)) + if (GEM_DEBUG_WARN_ON(engine->id != RCS0)) return -EINVAL; switch (INTEL_GEN(engine->i915)) { @@ -1587,7 +1821,7 @@ static void enable_execlists(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ /* * Make sure we're not enabling the new 12-deep CSB @@ -1608,7 +1842,7 @@ static void enable_execlists(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(STOP_RING)); I915_WRITE(RING_HWS_PGA(engine->mmio_base), - engine->status_page.ggtt_offset); + i915_ggtt_offset(engine->status_page.vma)); POSTING_READ(RING_HWS_PGA(engine->mmio_base)); } @@ -1628,6 +1862,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { intel_engine_apply_workarounds(engine); + intel_engine_apply_whitelist(engine); intel_mocs_init_engine(engine); @@ -1644,48 +1879,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) return 0; } -static int gen8_init_render_ring(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int ret; - - ret = gen8_init_common_ring(engine); - if (ret) - return ret; - - intel_engine_apply_whitelist(engine); - - /* We need to disable the AsyncFlip performance optimisations in order - * to use MI_WAIT_FOR_EVENT within the CS. It should already be - * programmed to '1' on all products. - * - * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv - */ - I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); - - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - - return 0; -} - -static int gen9_init_render_ring(struct intel_engine_cs *engine) -{ - int ret; - - ret = gen8_init_common_ring(engine); - if (ret) - return ret; - - intel_engine_apply_whitelist(engine); - - return 0; -} - -static struct i915_request * -execlists_reset_prepare(struct intel_engine_cs *engine) +static void execlists_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *request, *active; unsigned long flags; GEM_TRACE("%s: depth<-%d\n", engine->name, @@ -1701,59 +1897,40 @@ execlists_reset_prepare(struct intel_engine_cs *engine) * prevents the race. */ __tasklet_disable_sync_once(&execlists->tasklet); + GEM_BUG_ON(!reset_in_progress(execlists)); - spin_lock_irqsave(&engine->timeline.lock, flags); + intel_engine_stop_cs(engine); - /* - * We want to flush the pending context switches, having disabled - * the tasklet above, we can assume exclusive access to the execlists. - * For this allows us to catch up with an inflight preemption event, - * and avoid blaming an innocent request if the stall was due to the - * preemption itself. - */ - process_csb(engine); + /* And flush any current direct submission. */ + spin_lock_irqsave(&engine->timeline.lock, flags); + process_csb(engine); /* drain preemption events */ + spin_unlock_irqrestore(&engine->timeline.lock, flags); +} - /* - * The last active request can then be no later than the last request - * now in ELSP[0]. So search backwards from there, so that if the GPU - * has advanced beyond the last CSB update, it will be pardoned. - */ - active = NULL; - request = port_request(execlists->port); - if (request) { - /* - * Prevent the breadcrumb from advancing before we decide - * which request is currently active. - */ - intel_engine_stop_cs(engine); +static bool lrc_regs_ok(const struct i915_request *rq) +{ + const struct intel_ring *ring = rq->ring; + const u32 *regs = rq->hw_context->lrc_reg_state; - list_for_each_entry_from_reverse(request, - &engine->timeline.requests, - link) { - if (__i915_request_completed(request, - request->global_seqno)) - break; + /* Quick spot check for the common signs of context corruption */ - active = request; - } - } + if (regs[CTX_RING_BUFFER_CONTROL + 1] != + (RING_CTL_SIZE(ring->size) | RING_VALID)) + return false; - spin_unlock_irqrestore(&engine->timeline.lock, flags); + if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma)) + return false; - return active; + return true; } -static void execlists_reset(struct intel_engine_cs *engine, - struct i915_request *request) +static void execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq; unsigned long flags; u32 *regs; - GEM_TRACE("%s request global=%d, current=%d\n", - engine->name, request ? request->global_seqno : 0, - intel_engine_get_seqno(engine)); - spin_lock_irqsave(&engine->timeline.lock, flags); /* @@ -1768,12 +1945,28 @@ static void execlists_reset(struct intel_engine_cs *engine, execlists_cancel_port_requests(execlists); /* Push back any incomplete requests for replay after the reset. */ - __unwind_incomplete_requests(engine); + rq = __unwind_incomplete_requests(engine); /* Following the reset, we need to reload the CSB read/write pointers */ reset_csb_pointers(&engine->execlists); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + if (!rq) + goto out_unlock; + + /* + * If this request hasn't started yet, e.g. it is waiting on a + * semaphore, we need to avoid skipping the request or else we + * break the signaling chain. However, if the context is corrupt + * the request will not restart and we will be stuck with a wedged + * device. It is quite often the case that if we issue a reset + * while the GPU is loading the context image, that the context + * image becomes corrupt. + * + * Otherwise, if we have not started yet, the request should replay + * perfectly and we do not need to flag the result as being erroneous. + */ + if (!i915_request_started(rq) && lrc_regs_ok(rq)) + goto out_unlock; /* * If the request was innocent, we leave the request in the ELSP @@ -1786,8 +1979,9 @@ static void execlists_reset(struct intel_engine_cs *engine, * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - if (!request || request->fence.error != -EIO) - return; + i915_reset_request(rq, stalled); + if (!stalled && lrc_regs_ok(rq)) + goto out_unlock; /* * We want a simple context + ring to execute the breadcrumb update. @@ -1797,25 +1991,22 @@ static void execlists_reset(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - regs = request->hw_context->lrc_reg_state; + regs = rq->hw_context->lrc_reg_state; if (engine->pinned_default_state) { memcpy(regs, /* skip restoring the vanilla PPHWSP */ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, engine->context_size - PAGE_SIZE); } - execlists_init_reg_state(regs, - request->gem_context, engine, request->ring); - - /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ - regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma); - request->ring->head = intel_ring_wrap(request->ring, request->postfix); - regs[CTX_RING_HEAD + 1] = request->ring->head; + /* Rerun the request; its payload has been neutered (if guilty). */ + rq->ring->head = intel_ring_wrap(rq->ring, rq->head); + intel_ring_update_space(rq->ring); - intel_ring_update_space(request->ring); + execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring); + __execlists_update_reg_state(rq->hw_context, engine); - /* Reset WaIdleLiteRestore:bdw,skl as well */ - unwind_wa_tail(request); +out_unlock: + spin_unlock_irqrestore(&engine->timeline.lock, flags); } static void execlists_reset_finish(struct intel_engine_cs *engine) @@ -1826,66 +2017,23 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) * After a GPU reset, we may have requests to replay. Do so now while * we still have the forcewake to be sure that the GPU is not allowed * to sleep before we restart and reload a context. - * */ + GEM_BUG_ON(!reset_in_progress(execlists)); if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) execlists->tasklet.func(execlists->tasklet.data); - tasklet_enable(&execlists->tasklet); + if (__tasklet_enable(&execlists->tasklet)) + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&execlists->tasklet); GEM_TRACE("%s: depth->%d\n", engine->name, atomic_read(&execlists->tasklet.count)); } -static int intel_logical_ring_emit_pdps(struct i915_request *rq) -{ - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; - struct intel_engine_cs *engine = rq->engine; - const int num_lri_cmds = GEN8_3LVL_PDPES * 2; - u32 *cs; - int i; - - cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds); - for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); - *cs++ = lower_32_bits(pd_daddr); - } - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - static int gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) { u32 *cs; - int ret; - - /* Don't rely in hw updating PDPs, specially in lite-restore. - * Ideally, we should set Force PD Restore in ctx descriptor, - * but we can't. Force Restore would be a second option, but - * it is unsafe in case of lite-restore (because the ctx is - * not idle). PML4 is allocated during ppgtt init so this is - * not needed in 48-bit.*/ - if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && - !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && - !intel_vgpu_active(rq->i915)) { - ret = intel_logical_ring_emit_pdps(rq); - if (ret) - return ret; - - rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); - } cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1918,6 +2066,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); return 0; @@ -1925,16 +2074,14 @@ static int gen8_emit_bb_start(struct i915_request *rq, static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + ENGINE_POSTING_READ(engine, RING_IMR); } static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); } static int gen8_emit_flush(struct i915_request *request, u32 mode) @@ -2002,7 +2149,7 @@ static int gen8_emit_flush_render(struct i915_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(request->i915)) + if (IS_GEN(request->i915, 9)) vf_flush_wa = true; /* WaForGAMHang:kbl */ @@ -2044,45 +2191,62 @@ static int gen8_emit_flush_render(struct i915_request *request, * used as a workaround for not being allowed to do lite * restore with HEAD==TAIL (WaIdleLiteRestore). */ -static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs) +static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) { /* Ensure there's always at least one preemption point per-request. */ *cs++ = MI_ARB_CHECK; *cs++ = MI_NOOP; request->wa_tail = intel_ring_offset(request, cs); + + return cs; } -static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) +static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { - /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ - BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + request->timeline->hwsp_offset, + 0); + + cs = gen8_emit_ggtt_write(cs, + intel_engine_next_hangcheck_seqno(request->engine), + I915_GEM_HWS_HANGCHECK_ADDR, + MI_FLUSH_DW_STORE_INDEX); + - cs = gen8_emit_ggtt_write(cs, request->global_seqno, - intel_hws_seqno_address(request->engine)); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); - gen8_emit_wa_tail(request, cs); + return gen8_emit_wa_tail(request, cs); } -static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; -static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) +static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { - /* We're using qword write, seqno should be aligned to 8 bytes. */ - BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + request->timeline->hwsp_offset, + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); + + cs = gen8_emit_ggtt_write_rcs(cs, + intel_engine_next_hangcheck_seqno(request->engine), + I915_GEM_HWS_HANGCHECK_ADDR, + PIPE_CONTROL_STORE_DATA_INDEX); - cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno, - intel_hws_seqno_address(request->engine)); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); - gen8_emit_wa_tail(request, cs); + return gen8_emit_wa_tail(request, cs); } -static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS; static int gen8_init_rcs_context(struct i915_request *rq) { @@ -2122,7 +2286,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) dev_priv = engine->i915; if (engine->buffer) { - WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); } if (engine->cleanup) @@ -2150,14 +2314,10 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->unpark = NULL; engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (engine->i915->preempt_context) + if (!intel_vgpu_active(engine->i915)) + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + if (engine->preempt_context) engine->flags |= I915_ENGINE_HAS_PREEMPTION; - - engine->i915->caps.scheduler = - I915_SCHEDULER_CAP_ENABLED | - I915_SCHEDULER_CAP_PRIORITY; - if (intel_engine_has_preemption(engine)) - engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION; } static void @@ -2170,12 +2330,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->reset.reset = execlists_reset; engine->reset.finish = execlists_reset_finish; - engine->context_pin = execlists_context_pin; + engine->cops = &execlists_context_ops; engine->request_alloc = execlists_request_alloc; engine->emit_flush = gen8_emit_flush; - engine->emit_breadcrumb = gen8_emit_breadcrumb; - engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; + engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; engine->set_default_submission = intel_execlists_set_default_submission; @@ -2200,11 +2360,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) if (INTEL_GEN(engine->i915) < 11) { const u8 irq_shifts[] = { - [RCS] = GEN8_RCS_IRQ_SHIFT, - [BCS] = GEN8_BCS_IRQ_SHIFT, - [VCS] = GEN8_VCS1_IRQ_SHIFT, - [VCS2] = GEN8_VCS2_IRQ_SHIFT, - [VECS] = GEN8_VECS_IRQ_SHIFT, + [RCS0] = GEN8_RCS_IRQ_SHIFT, + [BCS0] = GEN8_BCS_IRQ_SHIFT, + [VCS0] = GEN8_VCS0_IRQ_SHIFT, + [VCS1] = GEN8_VCS1_IRQ_SHIFT, + [VECS0] = GEN8_VECS_IRQ_SHIFT, }; shift = irq_shifts[engine->id]; @@ -2214,10 +2374,14 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } -static void +static int logical_ring_setup(struct intel_engine_cs *engine) { - intel_engine_setup_common(engine); + int err; + + err = intel_engine_setup_common(engine); + if (err) + return err; /* Intentionally left blank. */ engine->buffer = NULL; @@ -2227,42 +2391,43 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + return 0; } static int logical_ring_init(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; struct intel_engine_execlists * const execlists = &engine->execlists; + u32 base = engine->mmio_base; int ret; ret = intel_engine_init_common(engine); if (ret) return ret; + intel_engine_init_workarounds(engine); + if (HAS_LOGICAL_RING_ELSQ(i915)) { - execlists->submit_reg = i915->regs + - i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); - execlists->ctrl_reg = i915->regs + - i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); + execlists->submit_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); + execlists->ctrl_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); } else { - execlists->submit_reg = i915->regs + - i915_mmio_reg_offset(RING_ELSP(engine)); + execlists->submit_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_ELSP(base)); } execlists->preempt_complete_status = ~0u; - if (i915->preempt_context) { - struct intel_context *ce = - to_intel_context(i915->preempt_context, engine); - + if (engine->preempt_context) execlists->preempt_complete_status = - upper_32_bits(ce->lrc_desc); - } + upper_32_bits(engine->preempt_context->lrc_desc); execlists->csb_status = - &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; execlists->csb_write = - &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; + &engine->status_page.addr[intel_hws_csb_write_index(i915)]; reset_csb_pointers(execlists); @@ -2271,23 +2436,16 @@ static int logical_ring_init(struct intel_engine_cs *engine) int logical_render_ring_init(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; int ret; - logical_ring_setup(engine); - - if (HAS_L3_DPF(dev_priv)) - engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + ret = logical_ring_setup(engine); + if (ret) + return ret; /* Override some for render ring. */ - if (INTEL_GEN(dev_priv) >= 9) - engine->init_hw = gen9_init_render_ring; - else - engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; - engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; - engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; ret = logical_ring_init(engine); if (ret) @@ -2305,34 +2463,65 @@ int logical_render_ring_init(struct intel_engine_cs *engine) } intel_engine_init_whitelist(engine); - intel_engine_init_workarounds(engine); return 0; } int logical_xcs_ring_init(struct intel_engine_cs *engine) { - logical_ring_setup(engine); + int err; + + err = logical_ring_setup(engine); + if (err) + return err; return logical_ring_init(engine); } -static u32 -make_rpcs(struct drm_i915_private *dev_priv) +u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu) { - bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg; - u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask); - u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]); + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + bool subslice_pg = sseu->has_subslice_pg; + struct intel_sseu ctx_sseu; + u8 slices, subslices; u32 rpcs = 0; /* * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_GEN(dev_priv) < 9) + if (INTEL_GEN(i915) < 9) return 0; /* + * If i915/perf is active, we want a stable powergating configuration + * on the system. + * + * We could choose full enablement, but on ICL we know there are use + * cases which disable slices for functional, apart for performance + * reasons. So in this case we select a known stable subset. + */ + if (!i915->perf.oa.exclusive_stream) { + ctx_sseu = *req_sseu; + } else { + ctx_sseu = intel_device_default_sseu(i915); + + if (IS_GEN(i915, 11)) { + /* + * We only need subslice count so it doesn't matter + * which ones we select - just turn off low bits in the + * amount of half of all available subslices per slice. + */ + ctx_sseu.subslice_mask = + ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2)); + ctx_sseu.slice_mask = 0x1; + } + } + + slices = hweight8(ctx_sseu.slice_mask); + subslices = hweight8(ctx_sseu.subslice_mask); + + /* * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits * wide and Icelake has up to eight subslices, specfial programming is * needed in order to correctly enable all subslices. @@ -2357,7 +2546,9 @@ make_rpcs(struct drm_i915_private *dev_priv) * subslices are enabled, or a count between one and four on the first * slice. */ - if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) { + if (IS_GEN(i915, 11) && + slices == 1 && + subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) { GEM_BUG_ON(subslices & 1); subslice_pg = false; @@ -2370,10 +2561,10 @@ make_rpcs(struct drm_i915_private *dev_priv) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { + if (sseu->has_slice_pg) { u32 mask, val = slices; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { mask = GEN11_RPCS_S_CNT_MASK; val <<= GEN11_RPCS_S_CNT_SHIFT; } else { @@ -2398,18 +2589,16 @@ make_rpcs(struct drm_i915_private *dev_priv) rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; } - if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { + if (sseu->has_eu_pg) { u32 val; - val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MIN_SHIFT; + val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); val &= GEN8_RPCS_EU_MIN_MASK; rpcs |= val; - val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MAX_SHIFT; + val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); val &= GEN8_RPCS_EU_MAX_MASK; @@ -2451,13 +2640,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) } static void execlists_init_reg_state(u32 *regs, - struct i915_gem_context *ctx, + struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct drm_i915_private *dev_priv = engine->i915; - u32 base = engine->mmio_base; + struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt; bool rcs = engine->class == RENDER_CLASS; + u32 base = engine->mmio_base; /* A context is actually a big batch buffer with several * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The @@ -2469,10 +2658,10 @@ static void execlists_init_reg_state(u32 *regs, regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | MI_LRI_FORCE_POSTED; - CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), + CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); - if (INTEL_GEN(dev_priv) < 11) { + if (INTEL_GEN(engine->i915) < 11) { regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); @@ -2527,29 +2716,33 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); - if (i915_vm_is_48bit(&ctx->ppgtt->vm)) { + if (i915_vm_is_4lvl(&ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and * other PDP Descriptors are ignored. */ - ASSIGN_CTX_PML4(ctx->ppgtt, regs); + ASSIGN_CTX_PML4(ppgtt, regs); + } else { + ASSIGN_CTX_PDP(ppgtt, regs, 3); + ASSIGN_CTX_PDP(ppgtt, regs, 2); + ASSIGN_CTX_PDP(ppgtt, regs, 1); + ASSIGN_CTX_PDP(ppgtt, regs, 0); } if (rcs) { regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); - CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev_priv)); + CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - i915_oa_init_reg_state(engine, ctx, regs); + i915_oa_init_reg_state(engine, ce, regs); } regs[CTX_END] = MI_BATCH_BUFFER_END; - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(engine->i915) >= 10) regs[CTX_END] |= BIT(0); } static int -populate_lr_context(struct i915_gem_context *ctx, +populate_lr_context(struct intel_context *ce, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, struct intel_ring *ring) @@ -2558,19 +2751,12 @@ populate_lr_context(struct i915_gem_context *ctx, u32 *regs; int ret; - ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); - return ret; - } - vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); return ret; } - ctx_obj->mm.dirty = true; if (engine->default_state) { /* @@ -2595,27 +2781,39 @@ populate_lr_context(struct i915_gem_context *ctx, /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ctx, engine, ring); + execlists_init_reg_state(regs, ce, engine, ring); if (!engine->default_state) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11) + if (ce->gem_context == engine->i915->preempt_context && + INTEL_GEN(engine->i915) < 11) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); + ret = 0; err_unpin_ctx: + __i915_gem_object_flush_map(ctx_obj, + LRC_HEADER_PAGES * PAGE_SIZE, + engine->context_size); i915_gem_object_unpin_map(ctx_obj); return ret; } -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce) +static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) +{ + if (ctx->timeline) + return i915_timeline_get(ctx->timeline); + else + return i915_timeline_create(ctx->i915, NULL); +} + +static int execlists_context_deferred_alloc(struct intel_context *ce, + struct intel_engine_cs *engine) { struct drm_i915_gem_object *ctx_obj; struct i915_vma *vma; - uint32_t context_size; + u32 context_size; struct intel_ring *ring; struct i915_timeline *timeline; int ret; @@ -2631,30 +2829,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, */ context_size += LRC_HEADER_PAGES * PAGE_SIZE; - ctx_obj = i915_gem_object_create(ctx->i915, context_size); + ctx_obj = i915_gem_object_create(engine->i915, context_size); if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); - vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL); + vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto error_deref_obj; } - timeline = i915_timeline_create(ctx->i915, ctx->name); + timeline = get_timeline(ce->gem_context); if (IS_ERR(timeline)) { ret = PTR_ERR(timeline); goto error_deref_obj; } - ring = intel_engine_create_ring(engine, timeline, ctx->ring_size); + ring = intel_engine_create_ring(engine, + timeline, + ce->gem_context->ring_size); i915_timeline_put(timeline); if (IS_ERR(ring)) { ret = PTR_ERR(ring); goto error_deref_obj; } - ret = populate_lr_context(ctx, ctx_obj, engine, ring); + ret = populate_lr_context(ce, ctx_obj, engine, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); goto error_ring_free; @@ -2666,7 +2866,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, return 0; error_ring_free: - intel_ring_free(ring); + intel_ring_put(ring); error_deref_obj: i915_gem_object_put(ctx_obj); return ret; @@ -2674,9 +2874,8 @@ error_deref_obj: void intel_lr_context_resume(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; struct i915_gem_context *ctx; - enum intel_engine_id id; + struct intel_context *ce; /* * Because we emit WA_TAIL_DWORDS there may be a disparity @@ -2690,23 +2889,72 @@ void intel_lr_context_resume(struct drm_i915_private *i915) * simplicity, we just zero everything out. */ list_for_each_entry(ctx, &i915->contexts.list, link) { - for_each_engine(engine, i915, id) { - struct intel_context *ce = - to_intel_context(ctx, engine); + list_for_each_entry(ce, &ctx->active_engines, active_link) { + GEM_BUG_ON(!ce->ring); + intel_ring_reset(ce->ring, 0); + __execlists_update_reg_state(ce, ce->engine); + } + } +} - if (!ce->state) - continue; +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max) +{ + const struct intel_engine_execlists *execlists = &engine->execlists; + struct i915_request *rq, *last; + unsigned long flags; + unsigned int count; + struct rb_node *rb; - intel_ring_reset(ce->ring, 0); + spin_lock_irqsave(&engine->timeline.lock, flags); - if (ce->pin_count) { /* otherwise done in context_pin */ - u32 *regs = ce->lrc_reg_state; + last = NULL; + count = 0; + list_for_each_entry(rq, &engine->timeline.requests, link) { + if (count++ < max - 1) + show_request(m, rq, "\t\tE "); + else + last = rq; + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d executing requests...\n", + count - max); + } + show_request(m, last, "\t\tE "); + } - regs[CTX_RING_HEAD + 1] = ce->ring->head; - regs[CTX_RING_TAIL + 1] = ce->ring->tail; - } + last = NULL; + count = 0; + if (execlists->queue_priority_hint != INT_MIN) + drm_printf(m, "\t\tQueue priority hint: %d\n", + execlists->queue_priority_hint); + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + int i; + + priolist_for_each_request(rq, p, i) { + if (count++ < max - 1) + show_request(m, rq, "\t\tQ "); + else + last = rq; } } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d queued requests...\n", + count - max); + } + show_request(m, last, "\t\tQ "); + } + + spin_unlock_irqrestore(&engine->timeline.lock, flags); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index f5a5502ecf70..92642ab91472 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -28,20 +28,20 @@ #include "i915_gem_context.h" /* Execlists regs */ -#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) -#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234) -#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4) -#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244) +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) -#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) +#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) -#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370) -#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8) -#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4) -#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0) -#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510) -#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550) +#define RING_CONTEXT_STATUS_BUF_BASE(base) _MMIO((base) + 0x370) +#define RING_CONTEXT_STATUS_BUF_LO(base, i) _MMIO((base) + 0x370 + (i) * 8) +#define RING_CONTEXT_STATUS_BUF_HI(base, i) _MMIO((base) + 0x370 + (i) * 8 + 4) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) #define EL_CTRL_LOAD (1 << 0) /* The docs specify that the write pointer wraps around after 5h, "After status @@ -97,11 +97,21 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); */ #define LRC_HEADER_PAGES LRC_PPHWSP_PN +struct drm_printer; + struct drm_i915_private; struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); - void intel_execlists_set_default_submission(struct intel_engine_cs *engine); +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max); + +u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu); + #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 96a8d9524b0c..8d202b13e24f 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) } static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, - uint8_t *avi_buf) + u8 *avi_buf) { u8 avi_if_ctrl; u8 block_count = 0; u8 *data; - uint16_t reg; + u16 reg; ssize_t ret; while (block_count < 4) { @@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, } static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, - const uint8_t *frame, + const u8 *frame, ssize_t len) { - uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; + u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; /* * Parade's frames contains 32 bytes of data, divided @@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, } static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, - const uint8_t *buffer, ssize_t len) + const u8 *buffer, ssize_t len) { int ret; - uint32_t val = 0; - uint32_t retry; - uint16_t reg; - const uint8_t *data = buffer; + u32 val = 0; + u32 retry; + u16 reg; + const u8 *data = buffer; reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET; while (val < len) { @@ -452,6 +452,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); } +void lspcon_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + /* FIXME implement this */ +} + void lspcon_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -459,21 +467,22 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, { ssize_t ret; union hdmi_infoframe frame; - uint8_t buf[VIDEO_DIP_DATA_SIZE]; + u8 buf[VIDEO_DIP_DATA_SIZE]; struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct intel_lspcon *lspcon = &dig_port->lspcon; - struct intel_dp *intel_dp = &dig_port->dp; - struct drm_connector *connector = &intel_dp->attached_connector->base; - const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode; - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; if (!lspcon->active) { DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); return; } + /* FIXME precompute infoframes */ + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - mode, is_hdmi2_sink); + conn_state->connector, + adjusted_mode); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; @@ -488,11 +497,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, frame.avi.colorspace = HDMI_COLORSPACE_RGB; } - drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, + drm_hdmi_avi_infoframe_quant_range(&frame.avi, + conn_state->connector, + adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL, - false, is_hdmi2_sink); + HDMI_QUANTIZATION_RANGE_FULL); ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { @@ -504,9 +514,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, buf, ret); } -bool lspcon_infoframe_enabled(struct intel_encoder *encoder, +u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { + /* FIXME actually read this from the hw */ return enc_to_intel_lspcon(&encoder->base)->active; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e6c5d985ea0a..34dd2d71814b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -32,7 +32,6 @@ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/vga_switcheroo.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -95,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -151,24 +152,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET; val = I915_READ(PP_ON_DELAYS(0)); - pps->port = (val & PANEL_PORT_SELECT_MASK) >> - PANEL_PORT_SELECT_SHIFT; - pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val); + pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val); + pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val); val = I915_READ(PP_OFF_DELAYS(0)); - pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; - pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val); + pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val); val = I915_READ(PP_DIVISOR(0)); - pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >> - PP_REFERENCE_DIVIDER_SHIFT; - val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT; + pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val); + val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val); /* * Remove the BSpec specified +1 (100ms) offset that accounts for a * too short power-cycle delay due to the asynchronous programming of @@ -208,16 +202,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, val |= PANEL_POWER_RESET; I915_WRITE(PP_CONTROL(0), val); - I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) | - (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) | - (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT)); - I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) | - (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT)); + I915_WRITE(PP_ON_DELAYS(0), + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | + REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); - val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT; - val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) << - PANEL_POWER_CYCLE_DELAY_SHIFT; - I915_WRITE(PP_DIVISOR(0), val); + I915_WRITE(PP_OFF_DELAYS(0), + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | + REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); + + I915_WRITE(PP_DIVISOR(0), + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, + DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_encoder *encoder, @@ -279,7 +276,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the PIPECONF reg. */ - if (IS_GEN4(dev_priv)) { + if (IS_GEN(dev_priv, 4)) { /* * Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. @@ -314,7 +311,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000)) + if (intel_wait_for_register(&dev_priv->uncore, + PP_STATUS(0), PP_ON, PP_ON, 5000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(pipe_config, conn_state); @@ -328,7 +326,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000)) + if (intel_wait_for_register(&dev_priv->uncore, + PP_STATUS(0), PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -379,9 +378,9 @@ intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = @@ -395,7 +394,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, /* Should never happen!! */ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) { DRM_ERROR("Can't support LVDS on pipe A\n"); - return false; + return -EINVAL; } if (lvds_encoder->a3_power == LVDS_A3_POWER_UP) @@ -421,7 +420,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; if (HAS_PCH_SPLIT(dev_priv)) { pipe_config->has_pch_encoder = true; @@ -440,7 +439,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, * user's requested refresh rate. */ - return true; + return 0; } static enum drm_connector_status @@ -745,20 +744,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev) +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) { - struct intel_encoder *intel_encoder; + struct intel_encoder *encoder; - for_each_intel_encoder(dev, intel_encoder) - if (intel_encoder->type == INTEL_OUTPUT_LVDS) - return intel_encoder; + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_LVDS) + return encoder; + } return NULL; } -bool intel_is_dual_link_lvds(struct drm_device *dev) +bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) { - struct intel_encoder *encoder = intel_get_lvds_encoder(dev); + struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv); return encoder && to_lvds_encoder(&encoder->base)->is_dual_link; } @@ -797,26 +797,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; } -static bool intel_lvds_supported(struct drm_i915_private *dev_priv) -{ - /* - * With the introduction of the PCH we gained a dedicated - * LVDS presence pin, use it. - */ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - return true; - - /* - * Otherwise LVDS was only attached to mobile products, - * except for the inglorious 830gm - */ - if (INTEL_GEN(dev_priv) <= 4 && - IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) - return true; - - return false; -} - /** * intel_lvds_init - setup LVDS connectors on this device * @dev_priv: i915 device @@ -832,7 +812,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; - struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; struct edid *edid; @@ -841,9 +820,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) u8 pin; u32 allowed_scalers; - if (!intel_lvds_supported(dev_priv)) - return; - /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { WARN(!dev_priv->vbt.int_lvds_support, @@ -909,6 +885,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector_attach_encoder(intel_connector, intel_encoder); @@ -919,7 +896,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_encoder->cloneable = 0; if (HAS_PCH_SPLIT(dev_priv)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - else if (IS_GEN4(dev_priv)) + else if (IS_GEN(dev_priv, 4)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1); else intel_encoder->crtc_mask = (1 << 1); @@ -973,30 +950,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_connector->edid = edid; - list_for_each_entry(scan, &connector->probed_modes, head) { - if (scan->type & DRM_MODE_TYPE_PREFERRED) { - DRM_DEBUG_KMS("using preferred mode from EDID: "); - drm_mode_debug_printmodeline(scan); - - fixed_mode = drm_mode_duplicate(dev, scan); - if (fixed_mode) - goto out; - } - } + fixed_mode = intel_panel_edid_fixed_mode(intel_connector); + if (fixed_mode) + goto out; /* Failed to get EDID, what about VBT? */ - if (dev_priv->vbt.lfp_lvds_vbt_mode) { - DRM_DEBUG_KMS("using mode from VBT: "); - drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode); - - fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - goto out; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); + if (fixed_mode) + goto out; /* * If we didn't get EDID, try checking if the panel is already turned diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 77e9871a8c9a..274ba78500c0 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -28,48 +28,60 @@ struct drm_i915_mocs_entry { u32 control_value; u16 l3cc_value; + u16 used; }; struct drm_i915_mocs_table { - u32 size; + unsigned int size; + unsigned int n_entries; const struct drm_i915_mocs_entry *table; }; /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ -#define LE_CACHEABILITY(value) ((value) << 0) -#define LE_TGT_CACHE(value) ((value) << 2) +#define _LE_CACHEABILITY(value) ((value) << 0) +#define _LE_TGT_CACHE(value) ((value) << 2) #define LE_LRUM(value) ((value) << 4) #define LE_AOM(value) ((value) << 6) #define LE_RSC(value) ((value) << 7) #define LE_SCC(value) ((value) << 8) #define LE_PFM(value) ((value) << 11) #define LE_SCF(value) ((value) << 14) +#define LE_COS(value) ((value) << 15) +#define LE_SSE(value) ((value) << 17) /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */ #define L3_ESC(value) ((value) << 0) #define L3_SCC(value) ((value) << 1) -#define L3_CACHEABILITY(value) ((value) << 4) +#define _L3_CACHEABILITY(value) ((value) << 4) /* Helper defines */ #define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ +#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ -#define LE_PAGETABLE 0 -#define LE_UC 1 -#define LE_WT 2 -#define LE_WB 3 - -/* L3 caching options */ -#define L3_DIRECT 0 -#define L3_UC 1 -#define L3_RESERVED 2 -#define L3_WB 3 +#define LE_0_PAGETABLE _LE_CACHEABILITY(0) +#define LE_1_UC _LE_CACHEABILITY(1) +#define LE_2_WT _LE_CACHEABILITY(2) +#define LE_3_WB _LE_CACHEABILITY(3) /* Target cache */ -#define LE_TC_PAGETABLE 0 -#define LE_TC_LLC 1 -#define LE_TC_LLC_ELLC 2 -#define LE_TC_LLC_ELLC_ALT 3 +#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0) +#define LE_TC_1_LLC _LE_TGT_CACHE(1) +#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2) +#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3) + +/* L3 caching options */ +#define L3_0_DIRECT _L3_CACHEABILITY(0) +#define L3_1_UC _L3_CACHEABILITY(1) +#define L3_2_RESERVED _L3_CACHEABILITY(2) +#define L3_3_WB _L3_CACHEABILITY(3) + +#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \ + [__idx] = { \ + .control_value = __control_value, \ + .l3cc_value = __l3cc_value, \ + .used = 1, \ + } /* * MOCS tables @@ -80,85 +92,147 @@ struct drm_i915_mocs_table { * LNCFCMOCS0 - LNCFCMOCS32 registers. * * These tables are intended to be kept reasonably consistent across - * platforms. However some of the fields are not applicable to all of - * them. + * HW platforms, and for ICL+, be identical across OSes. To achieve + * that, for Icelake and above, list of entries is published as part + * of bspec. * * Entries not part of the following tables are undefined as far as * userspace is concerned and shouldn't be relied upon. For the time - * being they will be implicitly initialized to the strictest caching - * configuration (uncached) to guarantee forwards compatibility with - * userspace programs written against more recent kernels providing - * additional MOCS entries. + * being they will be initialized to PTE. * - * NOTE: These tables MUST start with being uncached and the length - * MUST be less than 63 as the last two registers are reserved - * by the hardware. These tables are part of the kernel ABI and - * may only be updated incrementally by adding entries at the - * end. + * The last two entries are reserved by the hardware. For ICL+ they + * should be initialized according to bspec and never used, for older + * platforms they should never be written to. + * + * NOTE: These tables are part of bspec and defined as part of hardware + * interface for ICL+. For older platforms, they are part of kernel + * ABI. It is expected that, for specific hardware platform, existing + * entries will remain constant and the table will only be updated by + * adding new entries, filling unused positions. */ +#define GEN9_MOCS_ENTRIES \ + MOCS_ENTRY(I915_MOCS_UNCACHED, \ + LE_1_UC | LE_TC_2_LLC_ELLC, \ + L3_1_UC), \ + MOCS_ENTRY(I915_MOCS_PTE, \ + LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ + L3_3_WB) + static const struct drm_i915_mocs_entry skylake_mocs_table[] = { - [I915_MOCS_UNCACHED] = { - /* 0x00000009 */ - .control_value = LE_CACHEABILITY(LE_UC) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - - /* 0x0010 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), - }, - [I915_MOCS_PTE] = { - /* 0x00000038 */ - .control_value = LE_CACHEABILITY(LE_PAGETABLE) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - /* 0x0030 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), - }, - [I915_MOCS_CACHED] = { - /* 0x0000003b */ - .control_value = LE_CACHEABILITY(LE_WB) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - /* 0x0030 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), - }, + GEN9_MOCS_ENTRIES, + MOCS_ENTRY(I915_MOCS_CACHED, + LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), + L3_3_WB) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { - [I915_MOCS_UNCACHED] = { - /* 0x00000009 */ - .control_value = LE_CACHEABILITY(LE_UC) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - - /* 0x0010 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), - }, - [I915_MOCS_PTE] = { - /* 0x00000038 */ - .control_value = LE_CACHEABILITY(LE_PAGETABLE) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - - /* 0x0030 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), - }, - [I915_MOCS_CACHED] = { - /* 0x00000039 */ - .control_value = LE_CACHEABILITY(LE_UC) | - LE_TGT_CACHE(LE_TC_LLC_ELLC) | - LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | - LE_PFM(0) | LE_SCF(0), - - /* 0x0030 */ - .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), - }, + GEN9_MOCS_ENTRIES, + MOCS_ENTRY(I915_MOCS_CACHED, + LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3), + L3_3_WB) +}; + +#define GEN11_MOCS_ENTRIES \ + /* Base - Uncached (Deprecated) */ \ + MOCS_ENTRY(I915_MOCS_UNCACHED, \ + LE_1_UC | LE_TC_1_LLC, \ + L3_1_UC), \ + /* Base - L3 + LeCC:PAT (Deprecated) */ \ + MOCS_ENTRY(I915_MOCS_PTE, \ + LE_0_PAGETABLE | LE_TC_1_LLC, \ + L3_3_WB), \ + /* Base - L3 + LLC */ \ + MOCS_ENTRY(2, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_3_WB), \ + /* Base - Uncached */ \ + MOCS_ENTRY(3, \ + LE_1_UC | LE_TC_1_LLC, \ + L3_1_UC), \ + /* Base - L3 */ \ + MOCS_ENTRY(4, \ + LE_1_UC | LE_TC_1_LLC, \ + L3_3_WB), \ + /* Base - LLC */ \ + MOCS_ENTRY(5, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC), \ + /* Age 0 - LLC */ \ + MOCS_ENTRY(6, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \ + L3_1_UC), \ + /* Age 0 - L3 + LLC */ \ + MOCS_ENTRY(7, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \ + L3_3_WB), \ + /* Age: Don't Chg. - LLC */ \ + MOCS_ENTRY(8, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \ + L3_1_UC), \ + /* Age: Don't Chg. - L3 + LLC */ \ + MOCS_ENTRY(9, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \ + L3_3_WB), \ + /* No AOM - LLC */ \ + MOCS_ENTRY(10, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM - L3 + LLC */ \ + MOCS_ENTRY(11, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \ + L3_3_WB), \ + /* No AOM; Age 0 - LLC */ \ + MOCS_ENTRY(12, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM; Age 0 - L3 + LLC */ \ + MOCS_ENTRY(13, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \ + L3_3_WB), \ + /* No AOM; Age:DC - LLC */ \ + MOCS_ENTRY(14, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ + L3_1_UC), \ + /* No AOM; Age:DC - L3 + LLC */ \ + MOCS_ENTRY(15, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ + L3_3_WB), \ + /* Self-Snoop - L3 + LLC */ \ + MOCS_ENTRY(18, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(12.5%) */ \ + MOCS_ENTRY(19, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(25%) */ \ + MOCS_ENTRY(20, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(50%) */ \ + MOCS_ENTRY(21, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(75%) */ \ + MOCS_ENTRY(22, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \ + L3_3_WB), \ + /* Skip Caching - L3 + LLC(87.5%) */ \ + MOCS_ENTRY(23, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \ + L3_3_WB), \ + /* HW Reserved - SW program but never use */ \ + MOCS_ENTRY(62, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC), \ + /* HW Reserved - SW program but never use */ \ + MOCS_ENTRY(63, \ + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ + L3_1_UC) + +static const struct drm_i915_mocs_entry icelake_mocs_table[] = { + GEN11_MOCS_ENTRIES }; /** @@ -178,13 +252,19 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, { bool result = false; - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv) || - IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { + table->size = ARRAY_SIZE(icelake_mocs_table); + table->table = icelake_mocs_table; + table->n_entries = GEN11_NUM_MOCS_ENTRIES; + result = true; + } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { table->size = ARRAY_SIZE(skylake_mocs_table); + table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = skylake_mocs_table; result = true; } else if (IS_GEN9_LP(dev_priv)) { table->size = ARRAY_SIZE(broxton_mocs_table); + table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; result = true; } else { @@ -193,7 +273,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, } /* WaDisableSkipCaching:skl,bxt,kbl,glk */ - if (IS_GEN9(dev_priv)) { + if (IS_GEN(dev_priv, 9)) { int i; for (i = 0; i < table->size; i++) @@ -208,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) { switch (engine_id) { - case RCS: + case RCS0: return GEN9_GFX_MOCS(index); - case VCS: + case VCS0: return GEN9_MFX0_MOCS(index); - case BCS: + case BCS0: return GEN9_BLT_MOCS(index); - case VECS: + case VECS0: return GEN9_VEBOX_MOCS(index); - case VCS2: + case VCS1: return GEN9_MFX1_MOCS(index); - case VCS3: + case VCS2: return GEN11_MFX2_MOCS(index); default: MISSING_CASE(engine_id); @@ -226,6 +306,19 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) } } +/* + * Get control_value from MOCS entry taking into account when it's not used: + * I915_MOCS_PTE's value is returned in this case. + */ +static u32 get_entry_control(const struct drm_i915_mocs_table *table, + unsigned int index) +{ + if (table->table[index].used) + return table->table[index].control_value; + + return table->table[I915_MOCS_PTE].control_value; +} + /** * intel_mocs_init_engine() - emit the mocs control table * @engine: The engine for whom to emit the registers. @@ -238,27 +331,23 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; + u32 unused_value; if (!get_mocs_settings(dev_priv, &table)) return; - GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES); - - for (index = 0; index < table.size; index++) - I915_WRITE(mocs_register(engine->id, index), - table.table[index].control_value); - - /* - * Ok, now set the unused entries to uncached. These entries - * are officially undefined and no contract for the contents - * and settings is given for these entries. - * - * Entry 0 in the table is uncached - so we are just writing - * that value to all the used entries. - */ - for (; index < GEN9_NUM_MOCS_ENTRIES; index++) - I915_WRITE(mocs_register(engine->id, index), - table.table[0].control_value); + /* Set unused values to PTE */ + unused_value = table.table[I915_MOCS_PTE].control_value; + + for (index = 0; index < table.size; index++) { + u32 value = get_entry_control(&table, index); + + I915_WRITE(mocs_register(engine->id, index), value); + } + + /* All remaining entries are also unused */ + for (; index < table.n_entries; index++) + I915_WRITE(mocs_register(engine->id, index), unused_value); } /** @@ -276,33 +365,32 @@ static int emit_mocs_control_table(struct i915_request *rq, { enum intel_engine_id engine = rq->engine->id; unsigned int index; + u32 unused_value; u32 *cs; - if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) + if (GEM_WARN_ON(table->size > table->n_entries)) return -ENODEV; - cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); + /* Set unused values to PTE */ + unused_value = table->table[I915_MOCS_PTE].control_value; + + cs = intel_ring_begin(rq, 2 + 2 * table->n_entries); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES); + *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries); for (index = 0; index < table->size; index++) { + u32 value = get_entry_control(table, index); + *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); - *cs++ = table->table[index].control_value; + *cs++ = value; } - /* - * Ok, now set the unused entries to uncached. These entries - * are officially undefined and no contract for the contents - * and settings is given for these entries. - * - * Entry 0 in the table is uncached - so we are just writing - * that value to all the used entries. - */ - for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { + /* All remaining entries are also unused */ + for (; index < table->n_entries; index++) { *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); - *cs++ = table->table[0].control_value; + *cs++ = unused_value; } *cs++ = MI_NOOP; @@ -311,12 +399,24 @@ static int emit_mocs_control_table(struct i915_request *rq, return 0; } +/* + * Get l3cc_value from MOCS entry taking into account when it's not used: + * I915_MOCS_PTE's value is returned in this case. + */ +static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, + unsigned int index) +{ + if (table->table[index].used) + return table->table[index].l3cc_value; + + return table->table[I915_MOCS_PTE].l3cc_value; +} + static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, u16 low, u16 high) { - return table->table[low].l3cc_value | - table->table[high].l3cc_value << 16; + return low | high << 16; } /** @@ -333,38 +433,43 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, static int emit_mocs_l3cc_table(struct i915_request *rq, const struct drm_i915_mocs_table *table) { + u16 unused_value; unsigned int i; u32 *cs; - if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) + if (GEM_WARN_ON(table->size > table->n_entries)) return -ENODEV; - cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES); + /* Set unused values to PTE */ + unused_value = table->table[I915_MOCS_PTE].l3cc_value; + + cs = intel_ring_begin(rq, 2 + table->n_entries); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2); + *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2); + + for (i = 0; i < table->size / 2; i++) { + u16 low = get_entry_l3cc(table, 2 * i); + u16 high = get_entry_l3cc(table, 2 * i + 1); - for (i = 0; i < table->size/2; i++) { *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, 2 * i, 2 * i + 1); + *cs++ = l3cc_combine(table, low, high); } + /* Odd table size - 1 left over */ if (table->size & 0x01) { - /* Odd table size - 1 left over */ + u16 low = get_entry_l3cc(table, 2 * i); + *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, 2 * i, 0); + *cs++ = l3cc_combine(table, low, unused_value); i++; } - /* - * Now set the rest of the table to uncached - use entry 0 as - * this will be uncached. Leave the last pair uninitialised as - * they are reserved by the hardware. - */ - for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { + /* All remaining entries are also unused */ + for (; i < table->n_entries / 2; i++) { *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, 0, 0); + *cs++ = l3cc_combine(table, unused_value, unused_value); } *cs++ = MI_NOOP; @@ -391,26 +496,35 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) { struct drm_i915_mocs_table table; unsigned int i; + u16 unused_value; if (!get_mocs_settings(dev_priv, &table)) return; - for (i = 0; i < table.size/2; i++) - I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1)); + /* Set unused values to PTE */ + unused_value = table.table[I915_MOCS_PTE].l3cc_value; + + for (i = 0; i < table.size / 2; i++) { + u16 low = get_entry_l3cc(&table, 2 * i); + u16 high = get_entry_l3cc(&table, 2 * i + 1); + + I915_WRITE(GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, high)); + } /* Odd table size - 1 left over */ if (table.size & 0x01) { - I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0)); + u16 low = get_entry_l3cc(&table, 2 * i); + + I915_WRITE(GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, unused_value)); i++; } - /* - * Now set the rest of the table to uncached - use entry 0 as - * this will be uncached. Leave the last pair as initialised as - * they are reserved by the hardware. - */ - for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++) - I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0)); + /* All remaining entries are also unused */ + for (; i < table.n_entries / 2; i++) + I915_WRITE(GEN9_LNCFCMOCS(i), + l3cc_combine(&table, unused_value, unused_value)); } /** diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index d89080d75b80..3d99d1271b2b 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h @@ -49,7 +49,6 @@ * context handling keep the MOCS in step. */ -#include <drm/drmP.h> #include "i915_drv.h" int intel_rcs_context_init_mocs(struct i915_request *rq); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..5e00ee9270b5 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -30,7 +30,6 @@ #include <linux/firmware.h> #include <acpi/video.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "intel_opregion.h" @@ -55,7 +54,12 @@ struct opregion_header { u8 signature[16]; u32 size; - u32 opregion_ver; + struct { + u8 rsvd; + u8 revision; + u8 minor; + u8 major; + } __packed over; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; @@ -119,7 +123,8 @@ struct opregion_asle { u64 fdss; u32 fdsp; u32 stat; - u64 rvda; /* Physical address of raw vbt data */ + u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) + * address of raw VBT data. */ u32 rvds; /* Size of raw vbt data */ u8 rsvd[58]; } __packed; @@ -925,6 +930,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->header = base; opregion->lid_state = base + ACPI_CLID; + DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", + opregion->header->over.major, + opregion->header->over.minor, + opregion->header->over.revision); + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); @@ -953,11 +963,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (dmi_check_system(intel_no_opregion_vbt)) goto out; - if (opregion->header->opregion_ver >= 2 && opregion->asle && + if (opregion->header->over.major >= 2 && opregion->asle && opregion->asle->rvda && opregion->asle->rvds) { - opregion->rvda = memremap(opregion->asle->rvda, - opregion->asle->rvds, + resource_size_t rvda = opregion->asle->rvda; + + /* + * opregion 2.0: rvda is the physical VBT address. + * + * opregion 2.1+: rvda is unsigned, relative offset from + * opregion base, and should never point within opregion. + */ + if (opregion->header->over.major > 2 || + opregion->header->over.minor >= 1) { + WARN_ON(rvda < OPREGION_SIZE); + + rvda += asls; + } + + opregion->rvda = memremap(rvda, opregion->asle->rvds, MEMREMAP_WB); + vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { @@ -967,6 +992,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) goto out; } else { DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); + memunmap(opregion->rvda); + opregion->rvda = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 20ea7c99d13a..a882b8d42bd9 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -25,8 +25,9 @@ * * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c */ -#include <drm/drmP.h> #include <drm/i915_drm.h> +#include <drm/drm_fourcc.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_drv.h" @@ -185,7 +186,7 @@ struct intel_overlay { struct overlay_registers __iomem *regs; u32 flip_addr; /* flip handling */ - struct i915_gem_active last_flip; + struct i915_active_request last_flip; }; static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, @@ -213,29 +214,29 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, static void intel_overlay_submit_request(struct intel_overlay *overlay, struct i915_request *rq, - i915_gem_retire_fn retire) + i915_active_retire_fn retire) { - GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, - &overlay->i915->drm.struct_mutex)); - i915_gem_active_set_retire_fn(&overlay->last_flip, retire, - &overlay->i915->drm.struct_mutex); - i915_gem_active_set(&overlay->last_flip, rq); + GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip, + &overlay->i915->drm.struct_mutex)); + i915_active_request_set_retire_fn(&overlay->last_flip, retire, + &overlay->i915->drm.struct_mutex); + __i915_active_request_set(&overlay->last_flip, rq); i915_request_add(rq); } static int intel_overlay_do_wait_request(struct intel_overlay *overlay, struct i915_request *rq, - i915_gem_retire_fn retire) + i915_active_retire_fn retire) { intel_overlay_submit_request(overlay, rq, retire); - return i915_gem_active_retire(&overlay->last_flip, - &overlay->i915->drm.struct_mutex); + return i915_active_request_retire(&overlay->last_flip, + &overlay->i915->drm.struct_mutex); } static struct i915_request *alloc_request(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + struct intel_engine_cs *engine = dev_priv->engine[RCS0]; return i915_request_alloc(engine, dev_priv->kernel_context); } @@ -350,8 +351,9 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) i915_vma_put(vma); } -static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, - struct i915_request *rq) +static void +intel_overlay_release_old_vid_tail(struct i915_active_request *active, + struct i915_request *rq) { struct intel_overlay *overlay = container_of(active, typeof(*overlay), last_flip); @@ -359,7 +361,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, intel_overlay_release_old_vma(overlay); } -static void intel_overlay_off_tail(struct i915_gem_active *active, +static void intel_overlay_off_tail(struct i915_active_request *active, struct i915_request *rq) { struct intel_overlay *overlay = @@ -422,8 +424,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) * We have to be careful not to repeat work forever an make forward progess. */ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { - return i915_gem_active_retire(&overlay->last_flip, - &overlay->i915->drm.struct_mutex); + return i915_active_request_retire(&overlay->last_flip, + &overlay->i915->drm.struct_mutex); } /* Wait for pending overlay flip and release old frame. @@ -479,8 +481,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv) if (!overlay) return; - intel_overlay_release_old_vid(overlay); - overlay->old_xscale = 0; overlay->old_yscale = 0; overlay->crtc = NULL; @@ -541,7 +541,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt { u32 sw; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) sw = ALIGN((offset & 31) + width, 32); else sw = ALIGN((offset & 63) + width, 64); @@ -778,7 +778,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, u32 oconfig; oconfig = OCONF_CC_OUT_8BIT; - if (IS_GEN4(dev_priv)) + if (IS_GEN(dev_priv, 4)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -1012,7 +1012,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_GEN4(dev_priv) && rec->stride_Y < 512) + if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1246,7 +1246,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (!IS_GEN2(dev_priv)) { + if (!IS_GEN(dev_priv, 2)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1270,7 +1270,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, update_reg_attrs(overlay, overlay->regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) goto out_unlock; if (overlay->active) { @@ -1358,7 +1358,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) overlay->contrast = 75; overlay->saturation = 146; - init_request_active(&overlay->last_flip, NULL); + INIT_ACTIVE_REQUEST(&overlay->last_flip); mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e6cd7b55c018..47cd4a338db6 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -46,27 +46,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, drm_mode_set_crtcinfo(adjusted_mode, 0); } -/** - * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID - * @dev_priv: i915 device instance - * @fixed_mode : panel native mode - * @connector: LVDS/eDP connector - * - * Return downclock_avail - * Find the reduced downclock for LVDS/eDP in EDID. - */ -struct drm_display_mode * -intel_find_panel_downclock(struct drm_i915_private *dev_priv, - struct drm_display_mode *fixed_mode, - struct drm_connector *connector) +static bool is_downclock_mode(const struct drm_display_mode *downclock_mode, + const struct drm_display_mode *fixed_mode) { - struct drm_display_mode *scan, *tmp_mode; - int temp_downclock; + return drm_mode_match(downclock_mode, fixed_mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS) && + downclock_mode->clock < fixed_mode->clock; +} - temp_downclock = fixed_mode->clock; - tmp_mode = NULL; +struct drm_display_mode * +intel_panel_edid_downclock_mode(struct intel_connector *connector, + const struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + const struct drm_display_mode *scan, *best_mode = NULL; + struct drm_display_mode *downclock_mode; + int best_clock = fixed_mode->clock; - list_for_each_entry(scan, &connector->probed_modes, head) { + list_for_each_entry(scan, &connector->base.probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel * mode while they have the different refresh rate, it means @@ -74,29 +73,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv, * case we can set the different FPx0/1 to dynamically select * between low and high frequency. */ - if (scan->hdisplay == fixed_mode->hdisplay && - scan->hsync_start == fixed_mode->hsync_start && - scan->hsync_end == fixed_mode->hsync_end && - scan->htotal == fixed_mode->htotal && - scan->vdisplay == fixed_mode->vdisplay && - scan->vsync_start == fixed_mode->vsync_start && - scan->vsync_end == fixed_mode->vsync_end && - scan->vtotal == fixed_mode->vtotal) { - if (scan->clock < temp_downclock) { - /* - * The downclock is already found. But we - * expect to find the lower downclock. - */ - temp_downclock = scan->clock; - tmp_mode = scan; - } + if (is_downclock_mode(scan, fixed_mode) && + scan->clock < best_clock) { + /* + * The downclock is already found. But we + * expect to find the lower downclock. + */ + best_clock = scan->clock; + best_mode = scan; } } - if (temp_downclock < fixed_mode->clock) - return drm_mode_duplicate(&dev_priv->drm, tmp_mode); - else + if (!best_mode) + return NULL; + + downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode); + if (!downclock_mode) + return NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(downclock_mode); + + return downclock_mode; +} + +struct drm_display_mode * +intel_panel_edid_fixed_mode(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + const struct drm_display_mode *scan; + struct drm_display_mode *fixed_mode; + + if (list_empty(&connector->base.probed_modes)) + return NULL; + + /* prefer fixed mode from EDID if available */ + list_for_each_entry(scan, &connector->base.probed_modes, head) { + if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0) + continue; + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan); + if (!fixed_mode) + return NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + return fixed_mode; + } + + scan = list_first_entry(&connector->base.probed_modes, + typeof(*scan), head); + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan); + if (!fixed_mode) + return NULL; + + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + return fixed_mode; +} + +struct drm_display_mode * +intel_panel_vbt_fixed_mode(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_display_info *info = &connector->base.display_info; + struct drm_display_mode *fixed_mode; + + if (!dev_priv->vbt.lfp_lvds_vbt_mode) return NULL; + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, + dev_priv->vbt.lfp_lvds_vbt_mode); + if (!fixed_mode) + return NULL; + + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + info->width_mm = fixed_mode->width_mm; + info->height_mm = fixed_mode->height_mm; + + return fixed_mode; } /* adjusted_mode has been preset to be the panel's fixed mode */ @@ -563,7 +631,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc); } - if (IS_GEN4(dev_priv)) { + if (IS_GEN(dev_priv, 4)) { mask = BACKLIGHT_DUTY_CYCLE_MASK; } else { level <<= 1; @@ -929,7 +997,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 * that has backlight. */ - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); } @@ -1087,20 +1155,11 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, intel_panel_actually_set_backlight(conn_state, panel->backlight.level); } -void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - - if (!panel->backlight.present) - return; - - DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); - - mutex_lock(&dev_priv->backlight_lock); WARN_ON(panel->backlight.max == 0); @@ -1117,6 +1176,24 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, panel->backlight.enabled = true; if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_UNBLANK; +} + +void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + + if (!panel->backlight.present) + return; + + DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); + + mutex_lock(&dev_priv->backlight_lock); + + __intel_panel_enable_backlight(crtc_state, conn_state); mutex_unlock(&dev_priv->backlight_lock); } @@ -1203,17 +1280,20 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) struct intel_connector *connector = bl_get_data(bd); struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - u32 hw_level; - int ret; + intel_wakeref_t wakeref; + int ret = 0; - intel_runtime_pm_get(dev_priv); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 hw_level; - hw_level = intel_panel_get_backlight(connector); - ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - intel_runtime_pm_put(dev_priv); + hw_level = intel_panel_get_backlight(connector); + ret = scale_hw_to_user(connector, + hw_level, bd->props.max_brightness); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } return ret; } @@ -1484,8 +1564,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 pch_ctl1, pch_ctl2, val; - bool alt; + u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; + bool alt, cpu_mode; if (HAS_PCH_LPT(dev_priv)) alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; @@ -1499,6 +1579,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); panel->backlight.max = pch_ctl2 >> 16; + cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); + if (!panel->backlight.max) panel->backlight.max = get_backlight_max_vbt(connector); @@ -1507,12 +1589,28 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.min = get_backlight_min_vbt(connector); - val = lpt_get_backlight(connector); + panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; + + cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) && + !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) && + (cpu_ctl2 & BLM_PWM_ENABLE); + if (cpu_mode) + val = pch_get_backlight(connector); + else + val = lpt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, panel->backlight.max); - panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; + if (cpu_mode) { + DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n"); + + /* Write converted CPU PWM value to PCH override register */ + lpt_set_backlight(connector->base.state, panel->backlight.level); + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); + + I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE); + } return 0; } @@ -1557,7 +1655,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) + if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; if (IS_PINEVIEW(dev_priv)) @@ -1773,6 +1871,24 @@ static int pwm_setup_backlight(struct intel_connector *connector, return 0; } +void intel_panel_update_backlight(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + if (!panel->backlight.present) + return; + + mutex_lock(&dev_priv->backlight_lock); + if (!panel->backlight.enabled) + __intel_panel_enable_backlight(crtc_state, conn_state); + + mutex_unlock(&dev_priv->backlight_lock); +} + int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) { struct drm_i915_private *dev_priv = to_i915(connector->dev); @@ -1846,15 +1962,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = bxt_hz_to_pwm; - } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) { + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { panel->backlight.setup = cnp_setup_backlight; panel->backlight.enable = cnp_enable_backlight; panel->backlight.disable = cnp_disable_backlight; panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = cnp_hz_to_pwm; - } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || - HAS_PCH_KBP(dev_priv)) { + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { panel->backlight.setup = lpt_setup_backlight; panel->backlight.enable = lpt_enable_backlight; panel->backlight.disable = lpt_disable_backlight; @@ -1886,7 +2001,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.get = vlv_get_backlight; panel->backlight.hz_to_pwm = vlv_hz_to_pwm; } - } else if (IS_GEN4(dev_priv)) { + } else if (IS_GEN(dev_priv, 4)) { panel->backlight.setup = i965_setup_backlight; panel->backlight.enable = i965_enable_backlight; panel->backlight.disable = i965_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index f3c9010e332a..0b1378f0bff7 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -31,20 +31,24 @@ #include "intel_drv.h" static const char * const pipe_crc_sources[] = { - "none", - "plane1", - "plane2", - "pf", - "pipe", - "TV", - "DP-B", - "DP-C", - "DP-D", - "auto", + [INTEL_PIPE_CRC_SOURCE_NONE] = "none", + [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1", + [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2", + [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3", + [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4", + [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5", + [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6", + [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7", + [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe", + [INTEL_PIPE_CRC_SOURCE_TV] = "TV", + [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B", + [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C", + [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D", + [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto", }; static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; @@ -120,7 +124,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { bool need_stable_symbols = false; @@ -165,7 +169,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); tmp |= DC_BALANCE_RESET_VLV; switch (pipe) { @@ -190,10 +194,8 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { - bool need_stable_symbols = false; - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); if (ret) @@ -209,63 +211,30 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; break; - case INTEL_PIPE_CRC_SOURCE_DP_B: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_C: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_D: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; - need_stable_symbols = true; - break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: + /* + * The DP CRC source doesn't work on g4x. + * It can be made to work to some degree by selecting + * the correct CRC source before the port is enabled, + * and not touching the CRC source bits again until + * the port is disabled. But even then the bits + * eventually get stuck and a reboot is needed to get + * working CRCs on the pipe again. Let's simply + * refuse to use DP CRCs on g4x. + */ return -EINVAL; } - /* - * When the pipe CRC tap point is after the transcoders we need - * to tweak symbol-level features to produce a deterministic series of - * symbols for a given frame. We need to reset those features only once - * a frame (instead of every nth symbol): - * - DC-balance: used to ensure a better clock recovery from the data - * link (SDVO) - * - DisplayPort scrambling: used for EMI reduction - */ - if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - WARN_ON(!IS_G4X(dev_priv)); - - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); - - if (pipe == PIPE_A) - tmp |= PIPE_A_SCRAMBLE_RESET; - else - tmp |= PIPE_B_SCRAMBLE_RESET; - - I915_WRITE(PORT_DFT2_G4X, tmp); - } - return 0; } static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, enum pipe pipe) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); switch (pipe) { case PIPE_A: @@ -283,28 +252,10 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) tmp &= ~DC_BALANCE_RESET_VLV; I915_WRITE(PORT_DFT2_G4X, tmp); - -} - -static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - if (pipe == PIPE_A) - tmp &= ~PIPE_A_SCRAMBLE_RESET; - else - tmp &= ~PIPE_B_SCRAMBLE_RESET; - I915_WRITE(PORT_DFT2_G4X, tmp); - - if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); - } } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; @@ -329,19 +280,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv, - bool enable) +static void +intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) { - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; - int ret = 0; + int ret; drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(dev); + state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) { ret = -ENOMEM; goto unlock; @@ -356,17 +306,10 @@ retry: goto put_state; } - if (HAS_IPS(dev_priv)) { - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - pipe_config->ips_force_disable = enable; - } + pipe_config->base.mode_changed = pipe_config->has_psr; + pipe_config->crc_enabled = enable; - if (IS_HASWELL(dev_priv)) { + if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) { pipe_config->pch_pfit.force_thru = enable; if (pipe_config->cpu_transcoder == TRANSCODER_EDP && pipe_config->pch_pfit.enabled != enable) @@ -392,11 +335,10 @@ unlock: static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val, - bool set_wa) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - *source = INTEL_PIPE_CRC_SOURCE_PF; + *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: @@ -405,11 +347,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; - case INTEL_PIPE_CRC_SOURCE_PF: - if (set_wa && (IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && pipe == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, true); - + case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; case INTEL_PIPE_CRC_SOURCE_NONE: @@ -422,21 +360,65 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return 0; } +static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source, + u32 *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE3: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE4: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE5: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE6: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE7: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, - enum intel_pipe_crc_source *source, u32 *val, - bool set_wa) + enum intel_pipe_crc_source *source, u32 *val) { - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) return i8xx_pipe_crc_ctl_reg(source, val); else if (INTEL_GEN(dev_priv) < 5) return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val); - else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) + else if (IS_GEN_RANGE(dev_priv, 5, 6)) return ilk_pipe_crc_ctl_reg(source, val); + else if (INTEL_GEN(dev_priv) < 9) + return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else - return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); + return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val); } static int @@ -486,9 +468,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv, switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_TV: - case INTEL_PIPE_CRC_SOURCE_DP_B: - case INTEL_PIPE_CRC_SOURCE_DP_C: - case INTEL_PIPE_CRC_SOURCE_DP_D: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: @@ -532,7 +511,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv, case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_PLANE1: case INTEL_PIPE_CRC_SOURCE_PLANE2: - case INTEL_PIPE_CRC_SOURCE_PF: + case INTEL_PIPE_CRC_SOURCE_NONE: + return 0; + default: + return -EINVAL; + } +} + +static int skl_crc_source_valid(struct drm_i915_private *dev_priv, + const enum intel_pipe_crc_source source) +{ + switch (source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + case INTEL_PIPE_CRC_SOURCE_PLANE1: + case INTEL_PIPE_CRC_SOURCE_PLANE2: + case INTEL_PIPE_CRC_SOURCE_PLANE3: + case INTEL_PIPE_CRC_SOURCE_PLANE4: + case INTEL_PIPE_CRC_SOURCE_PLANE5: + case INTEL_PIPE_CRC_SOURCE_PLANE6: + case INTEL_PIPE_CRC_SOURCE_PLANE7: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: @@ -544,16 +541,18 @@ static int intel_is_valid_crc_source(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) return i8xx_crc_source_valid(dev_priv, source); else if (INTEL_GEN(dev_priv) < 5) return i9xx_crc_source_valid(dev_priv, source); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_crc_source_valid(dev_priv, source); - else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) + else if (IS_GEN_RANGE(dev_priv, 5, 6)) return ilk_crc_source_valid(dev_priv, source); - else + else if (INTEL_GEN(dev_priv) < 9) return ivb_crc_source_valid(dev_priv, source); + else + return skl_crc_source_valid(dev_priv, source); } const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, @@ -589,8 +588,10 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; + intel_wakeref_t wakeref; u32 val = 0; /* shut up gcc */ int ret = 0; + bool enable; if (display_crc_ctl_parse_source(source_name, &source) < 0) { DRM_DEBUG_DRIVER("unknown source %s\n", source_name); @@ -598,12 +599,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) } power_domain = POWER_DOMAIN_PIPE(crtc->index); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) { DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); return -EIO; } - ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true); + enable = source != INTEL_PIPE_CRC_SOURCE_NONE; + if (enable) + intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true); + + ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val); if (ret != 0) goto out; @@ -612,19 +618,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) POSTING_READ(PIPE_CRC_CTL(crtc->index)); if (!source) { - if (IS_G4X(dev_priv)) - g4x_undo_pipe_scramble_reset(dev_priv, crtc->index); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_undo_pipe_scramble_reset(dev_priv, crtc->index); - else if ((IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, false); } pipe_crc->skipped = 0; out: - intel_display_power_put(dev_priv, power_domain); + if (!enable) + intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false); + + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -639,7 +643,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc) if (!crtc->crc.opened) return; - if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0) + if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0) return; /* Don't need pipe_crc->lock here, IRQs are not generated. */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a26b4eddda25..0e05ee1f3ea0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,13 +26,16 @@ */ #include <linux/cpufreq.h> +#include <linux/module.h> #include <linux/pm_runtime.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> + #include "i915_drv.h" #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" -#include <linux/module.h> -#include <drm/drm_atomic_helper.h> /** * DOC: RC6 @@ -335,12 +338,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (enable) val |= DSP_MAXFIFO_PM5_ENABLE; else val &= ~DSP_MAXFIFO_PM5_ENABLE; - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); mutex_unlock(&dev_priv->pcu_lock); } @@ -480,7 +483,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) int sprite0_start, sprite1_start; switch (pipe) { - uint32_t dsparb, dsparb2, dsparb3; + u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ(DSPARB); dsparb2 = I915_READ(DSPARB2); @@ -513,7 +516,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; @@ -529,7 +532,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, static int i830_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x1ff; @@ -546,7 +549,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, static int i845_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; @@ -667,9 +670,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate, unsigned int cpp, unsigned int latency) { - uint64_t ret; + u64 ret; - ret = (uint64_t) pixel_rate * cpp * latency; + ret = (u64)pixel_rate * cpp * latency; ret = DIV_ROUND_UP_ULL(ret, 10000); return ret; @@ -847,7 +850,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) u32 reg; unsigned int wm; - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), + latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq); @@ -1089,9 +1092,9 @@ static int g4x_fbc_fifo_size(int level) } } -static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) +static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1188,9 +1191,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, return dirty; } -static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t pri_val); +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 pri_val); static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -1399,10 +1402,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) return 0; } -static int g4x_compute_intermediate_wm(struct drm_device *dev, - struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) +static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; struct intel_atomic_state *intel_state = @@ -1599,9 +1601,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) +static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1969,7 +1971,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, spin_lock(&dev_priv->uncore.lock); switch (crtc->pipe) { - uint32_t dsparb, dsparb2, dsparb3; + u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ_FW(DSPARB); dsparb2 = I915_READ_FW(DSPARB2); @@ -2032,10 +2034,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, #undef VLV_FIFO -static int vlv_compute_intermediate_wm(struct drm_device *dev, - struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) +static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; struct intel_atomic_state *intel_state = @@ -2264,8 +2265,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) { struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); const struct intel_watermark_params *wm_info; - uint32_t fwater_lo; - uint32_t fwater_hi; + u32 fwater_lo; + u32 fwater_hi; int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; @@ -2273,7 +2274,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (IS_I945GM(dev_priv)) wm_info = &i945_wm_info; - else if (!IS_GEN2(dev_priv)) + else if (!IS_GEN(dev_priv, 2)) wm_info = &i915_wm_info; else wm_info = &i830_a_wm_info; @@ -2287,7 +2288,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) crtc->base.primary->state->fb; int cpp; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) cpp = 4; else cpp = fb->format->cpp[0]; @@ -2302,7 +2303,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) planea_wm = wm_info->max_wm; } - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) wm_info = &i830_bc_wm_info; fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); @@ -2314,7 +2315,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) crtc->base.primary->state->fb; int cpp; - if (IS_GEN2(dev_priv)) + if (IS_GEN(dev_priv, 2)) cpp = 4; else cpp = fb->format->cpp[0]; @@ -2408,7 +2409,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; const struct drm_display_mode *adjusted_mode; - uint32_t fwater_lo; + u32 fwater_lo; int planea_wm; crtc = single_enabled_crtc(dev_priv); @@ -2457,8 +2458,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate, return ret; } -static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, - uint8_t cpp) +static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) { /* * Neither of these should be possible since this function shouldn't be @@ -2475,22 +2475,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, } struct ilk_wm_maximums { - uint16_t pri; - uint16_t spr; - uint16_t cur; - uint16_t fbc; + u16 pri; + u16 spr; + u16 cur; + u16 fbc; }; /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value, - bool is_lp) +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value, bool is_lp) { - uint32_t method1, method2; + u32 method1, method2; int cpp; if (mem_value == 0) @@ -2518,11 +2517,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value) +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value) { - uint32_t method1, method2; + u32 method1, method2; int cpp; if (mem_value == 0) @@ -2545,9 +2544,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value) +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value) { int cpp; @@ -2565,9 +2564,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, } /* Only for WM_LP. */ -static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t pri_val) +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 pri_val) { int cpp; @@ -2626,13 +2625,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) } /* Calculate the maximum primary/sprite plane watermark */ -static unsigned int ilk_plane_wm_max(const struct drm_device *dev, +static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, bool is_sprite) { - struct drm_i915_private *dev_priv = to_i915(dev); unsigned int fifo_size = ilk_display_fifo_size(dev_priv); /* if sprites aren't enabled, sprites get nothing */ @@ -2668,7 +2666,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, } /* Calculate the maximum cursor plane watermark */ -static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, +static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config) { @@ -2677,19 +2675,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, return 64; /* otherwise just report max that registers can hold */ - return ilk_cursor_wm_reg_max(to_i915(dev), level); + return ilk_cursor_wm_reg_max(dev_priv, level); } -static void ilk_compute_wm_maximums(const struct drm_device *dev, +static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, struct ilk_wm_maximums *max) { - max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); - max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); - max->cur = ilk_cursor_wm_max(dev, level, config); - max->fbc = ilk_fbc_wm_reg_max(to_i915(dev)); + max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev_priv, level, config); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); } static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, @@ -2734,9 +2732,9 @@ static bool ilk_validate_wm_level(int level, DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", level, result->cur_val, max->cur); - result->pri_val = min_t(uint32_t, result->pri_val, max->pri); - result->spr_val = min_t(uint32_t, result->spr_val, max->spr); - result->cur_val = min_t(uint32_t, result->cur_val, max->cur); + result->pri_val = min_t(u32, result->pri_val, max->pri); + result->spr_val = min_t(u32, result->spr_val, max->spr); + result->cur_val = min_t(u32, result->cur_val, max->cur); result->enable = true; } @@ -2752,9 +2750,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_plane_state *curstate, struct intel_wm_level *result) { - uint16_t pri_latency = dev_priv->wm.pri_latency[level]; - uint16_t spr_latency = dev_priv->wm.spr_latency[level]; - uint16_t cur_latency = dev_priv->wm.cur_latency[level]; + u16 pri_latency = dev_priv->wm.pri_latency[level]; + u16 spr_latency = dev_priv->wm.spr_latency[level]; + u16 cur_latency = dev_priv->wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { @@ -2778,7 +2776,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static uint32_t +static u32 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) { const struct intel_atomic_state *intel_state = @@ -2807,10 +2805,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) } static void intel_read_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[8]) + u16 wm[8]) { if (INTEL_GEN(dev_priv) >= 9) { - uint32_t val; + u32 val; int ret, i; int level, max_level = ilk_wm_max_level(dev_priv); @@ -2894,7 +2892,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[0] += 1; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - uint64_t sskpd = I915_READ64(MCH_SSKPD); + u64 sskpd = I915_READ64(MCH_SSKPD); wm[0] = (sskpd >> 56) & 0xFF; if (wm[0] == 0) @@ -2904,14 +2902,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[3] = (sskpd >> 20) & 0x1FF; wm[4] = (sskpd >> 32) & 0x1FF; } else if (INTEL_GEN(dev_priv) >= 6) { - uint32_t sskpd = I915_READ(MCH_SSKPD); + u32 sskpd = I915_READ(MCH_SSKPD); wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; } else if (INTEL_GEN(dev_priv) >= 5) { - uint32_t mltr = I915_READ(MLTR_ILK); + u32 mltr = I915_READ(MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; @@ -2923,18 +2921,18 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, } static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5]) + u16 wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5]) + u16 wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) wm[0] = 13; } @@ -2953,7 +2951,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) static void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, - const uint16_t wm[8]) + const u16 wm[8]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -2982,7 +2980,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, } static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5], uint16_t min) + u16 wm[5], u16 min) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -2991,7 +2989,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, wm[0] = max(wm[0], min); for (level = 1; level <= max_level; level++) - wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); return true; } @@ -3061,7 +3059,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { snb_wm_latency_quirk(dev_priv); snb_wm_lp3_irq_quirk(dev_priv); } @@ -3073,7 +3071,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); } -static bool ilk_validate_pipe_wm(struct drm_device *dev, +static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, struct intel_pipe_wm *pipe_wm) { /* LP0 watermark maximums depend on this pipe alone */ @@ -3085,7 +3083,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev, struct ilk_wm_maximums max; /* LP0 watermarks always use 1/2 DDB partitioning */ - ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); + ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); /* At least LP0 must be valid */ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { @@ -3150,7 +3148,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) pipe_wm->linetime = hsw_compute_linetime_wm(cstate); - if (!ilk_validate_pipe_wm(dev, pipe_wm)) + if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) return -EINVAL; ilk_compute_wm_reg_maximums(dev_priv, 1, &max); @@ -3180,17 +3178,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) * state and the new state. These can be programmed to the hardware * immediately. */ -static int ilk_compute_intermediate_wm(struct drm_device *dev, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *newstate) +static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) { + struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_atomic_state *intel_state = to_intel_atomic_state(newstate->base.state); const struct intel_crtc_state *oldstate = intel_atomic_get_old_crtc_state(intel_state, intel_crtc); const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; - int level, max_level = ilk_wm_max_level(to_i915(dev)); + int level, max_level = ilk_wm_max_level(dev_priv); /* * Start with the final, target watermarks, then combine with the @@ -3223,7 +3221,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * there's no safe way to transition from the old state to * the new state, so we need to fail the atomic transaction. */ - if (!ilk_validate_pipe_wm(dev, a)) + if (!ilk_validate_pipe_wm(dev_priv, a)) return -EINVAL; /* @@ -3239,7 +3237,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, /* * Merge the watermarks from all active pipes for a specific level. */ -static void ilk_merge_wm_level(struct drm_device *dev, +static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, int level, struct intel_wm_level *ret_wm) { @@ -3247,7 +3245,7 @@ static void ilk_merge_wm_level(struct drm_device *dev, ret_wm->enable = true; - for_each_intel_crtc(dev, intel_crtc) { + for_each_intel_crtc(&dev_priv->drm, intel_crtc) { const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; const struct intel_wm_level *wm = &active->wm[level]; @@ -3272,12 +3270,11 @@ static void ilk_merge_wm_level(struct drm_device *dev, /* * Merge all low power watermarks for all active pipes. */ -static void ilk_wm_merge(struct drm_device *dev, +static void ilk_wm_merge(struct drm_i915_private *dev_priv, const struct intel_wm_config *config, const struct ilk_wm_maximums *max, struct intel_pipe_wm *merged) { - struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev_priv); int last_enabled_level = max_level; @@ -3293,7 +3290,7 @@ static void ilk_wm_merge(struct drm_device *dev, for (level = 1; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; - ilk_merge_wm_level(dev, level, wm); + ilk_merge_wm_level(dev_priv, level, wm); if (level > last_enabled_level) wm->enable = false; @@ -3318,7 +3315,7 @@ static void ilk_wm_merge(struct drm_device *dev, * What we should check here is whether FBC can be * enabled sometime later. */ - if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled && + if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled && intel_fbc_is_active(dev_priv)) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -3335,22 +3332,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) } /* The value we need to program into the WM_LPx latency field */ -static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) +static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, + int level) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) return 2 * level; else return dev_priv->wm.pri_latency[level]; } -static void ilk_compute_wm_results(struct drm_device *dev, +static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, const struct intel_pipe_wm *merged, enum intel_ddb_partitioning partitioning, struct ilk_wm_values *results) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc; int level, wm_lp; @@ -3370,7 +3365,7 @@ static void ilk_compute_wm_results(struct drm_device *dev, * disabled. Doing otherwise could cause underruns. */ results->wm_lp[wm_lp - 1] = - (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | + (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) | (r->pri_val << WM1_LP_SR_SHIFT) | r->cur_val; @@ -3396,7 +3391,7 @@ static void ilk_compute_wm_results(struct drm_device *dev, } /* LP0 register values */ - for_each_intel_crtc(dev, intel_crtc) { + for_each_intel_crtc(&dev_priv->drm, intel_crtc) { enum pipe pipe = intel_crtc->pipe; const struct intel_wm_level *r = &intel_crtc->wm.active.ilk.wm[0]; @@ -3415,11 +3410,12 @@ static void ilk_compute_wm_results(struct drm_device *dev, /* Find the result with the highest level enabled. Check for enable_fbc_wm in * case both are at the same level. Prefer r1 in case they're the same. */ -static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, - struct intel_pipe_wm *r1, - struct intel_pipe_wm *r2) +static struct intel_pipe_wm * +ilk_find_best_result(struct drm_i915_private *dev_priv, + struct intel_pipe_wm *r1, + struct intel_pipe_wm *r2) { - int level, max_level = ilk_wm_max_level(to_i915(dev)); + int level, max_level = ilk_wm_max_level(dev_priv); int level1 = 0, level2 = 0; for (level = 1; level <= max_level; level++) { @@ -3540,7 +3536,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, { struct ilk_wm_values *previous = &dev_priv->wm.hw; unsigned int dirty; - uint32_t val; + u32 val; dirty = ilk_compute_wm_dirty(dev_priv, previous, results); if (!dirty) @@ -3628,7 +3624,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 11) return enabled_slices; - if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) + /* + * FIXME: for now we'll only ever use 1 slice; pretend that we have + * only that 1 slice enabled until we have a proper way for on-demand + * toggling of the second slice. + */ + if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) enabled_slices++; return enabled_slices; @@ -3638,14 +3639,9 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv) * FIXME: We still don't have the proper code detect if we need to apply the WA, * so assume we'll always need it in order to avoid underruns. */ -static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) +static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - - if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) - return true; - - return false; + return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv); } static bool @@ -3677,25 +3673,25 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_ENABLED) return 0; - DRM_DEBUG_KMS("Enabling the SAGV\n"); + DRM_DEBUG_KMS("Enabling SAGV\n"); mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); - /* We don't need to wait for the SAGV when enabling */ + /* We don't need to wait for SAGV when enabling */ mutex_unlock(&dev_priv->pcu_lock); /* * Some skl systems, pre-release machines in particular, - * don't actually have an SAGV. + * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to enable the SAGV\n"); + DRM_ERROR("Failed to enable SAGV\n"); return ret; } @@ -3714,7 +3710,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) if (dev_priv->sagv_status == I915_SAGV_DISABLED) return 0; - DRM_DEBUG_KMS("Disabling the SAGV\n"); + DRM_DEBUG_KMS("Disabling SAGV\n"); mutex_lock(&dev_priv->pcu_lock); /* bspec says to keep retrying for at least 1 ms */ @@ -3726,14 +3722,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) /* * Some skl systems, pre-release machines in particular, - * don't actually have an SAGV. + * don't actually have SAGV. */ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; } else if (ret < 0) { - DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); + DRM_ERROR("Failed to disable SAGV (%d)\n", ret); return ret; } @@ -3756,15 +3752,15 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) if (!intel_has_sagv(dev_priv)) return false; - if (IS_GEN9(dev_priv)) + if (IS_GEN(dev_priv, 9)) sagv_block_time_us = 30; - else if (IS_GEN10(dev_priv)) + else if (IS_GEN(dev_priv, 10)) sagv_block_time_us = 20; else sagv_block_time_us = 10; /* - * SKL+ workaround: bspec recommends we disable the SAGV when we have + * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled * * If there are no active CRTCs, no additional checks need be performed @@ -3797,7 +3793,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) latency = dev_priv->wm.skl_latency[level]; - if (skl_needs_memory_bw_wa(intel_state) && + if (skl_needs_memory_bw_wa(dev_priv) && plane->base.state->fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; @@ -3805,7 +3801,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) /* * If any of the planes on this pipe don't enable wm levels that * incur memory latencies higher than sagv_block_time_us we - * can't enable the SAGV. + * can't enable SAGV. */ if (latency < sagv_block_time_us) return false; @@ -3834,8 +3830,13 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, /* * 12GB/s is maximum BW supported by single DBuf slice. + * + * FIXME dbuf slice code is broken: + * - must wait for planes to stop using the slice before powering it off + * - plane straddling both slices is illegal in multi-pipe scenarios + * - should validate we stay within the hw bandwidth limits */ - if (num_active > 1 || total_data_bw >= GBps(12)) { + if (0 && (num_active > 1 || total_data_bw >= GBps(12))) { ddb->enabled_slices = 2; } else { ddb->enabled_slices = 1; @@ -3923,25 +3924,51 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width; } -static unsigned int skl_cursor_allocation(int num_active) +static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane); +static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, + int level, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */); + +static unsigned int +skl_cursor_allocation(const struct intel_crtc_state *crtc_state, + int num_active) { - if (num_active == 1) - return 32; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + int level, max_level = ilk_wm_max_level(dev_priv); + struct skl_wm_level wm = {}; + int ret, min_ddb_alloc = 0; + struct skl_wm_params wp; + + ret = skl_compute_wm_params(crtc_state, 256, + drm_format_info(DRM_FORMAT_ARGB8888), + DRM_FORMAT_MOD_LINEAR, + DRM_MODE_ROTATE_0, + crtc_state->pixel_rate, &wp, 0); + WARN_ON(ret); + + for (level = 0; level <= max_level; level++) { + skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm); + if (wm.min_ddb_alloc == U16_MAX) + break; + + min_ddb_alloc = wm.min_ddb_alloc; + } - return 8; + return max(num_active == 1 ? 32 : 8, min_ddb_alloc); } static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, struct skl_ddb_entry *entry, u32 reg) { - u16 mask; - if (INTEL_GEN(dev_priv) >= 11) - mask = ICL_DDB_ENTRY_MASK; - else - mask = SKL_DDB_ENTRY_MASK; - entry->start = reg & mask; - entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask; + entry->start = reg & DDB_ENTRY_MASK; + entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK; if (entry->end) entry->end += 1; @@ -3979,7 +4006,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - if (fourcc == DRM_FORMAT_NV12) + if (is_planar_yuv_format(fourcc)) swap(val, val2); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); @@ -3994,10 +4021,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; enum plane_id plane_id; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return; for_each_plane_id_on_crtc(crtc, plane_id) @@ -4006,7 +4035,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, &ddb_y[plane_id], &ddb_uv[plane_id]); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, @@ -4036,7 +4065,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { struct intel_plane *plane = to_intel_plane(pstate->base.plane); - uint32_t src_w, src_h, dst_w, dst_h; + u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4082,8 +4111,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) return pipe_downscale; if (crtc_state->pch_pfit.enabled) { - uint32_t src_w, src_h, dst_w, dst_h; - uint32_t pfit_size = crtc_state->pch_pfit.size; + u32 src_w, src_h, dst_w, dst_h; + u32 pfit_size = crtc_state->pch_pfit.size; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4116,7 +4145,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, const struct drm_plane_state *pstate; struct intel_plane_state *intel_pstate; int crtc_clock, dotclk; - uint32_t pipe_max_pixel_rate; + u32 pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); @@ -4172,8 +4201,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, { struct intel_plane *intel_plane = to_intel_plane(intel_pstate->base.plane); - uint32_t data_rate; - uint32_t width = 0, height = 0; + u32 data_rate; + u32 width = 0, height = 0; struct drm_framebuffer *fb; u32 format; uint_fixed_16_16_t down_scale_amount; @@ -4187,7 +4216,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, if (intel_plane->id == PLANE_CURSOR) return 0; - if (plane == 1 && format != DRM_FORMAT_NV12) + if (plane == 1 && !is_planar_yuv_format(format)) return 0; /* @@ -4199,7 +4228,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, height = drm_rect_height(&intel_pstate->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ - if (plane == 1 && format == DRM_FORMAT_NV12) { + if (plane == 1 && is_planar_yuv_format(format)) { width /= 2; height /= 2; } @@ -4306,102 +4335,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, return total_data_rate; } -static uint16_t -skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) -{ - struct drm_framebuffer *fb = pstate->fb; - struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); - uint32_t src_w, src_h; - uint32_t min_scanlines = 8; - uint8_t plane_bpp; - - if (WARN_ON(!fb)) - return 0; - - /* For packed formats, and uv-plane, return 0 */ - if (plane == 1 && fb->format->format != DRM_FORMAT_NV12) - return 0; - - /* For Non Y-tile return 8-blocks */ - if (fb->modifier != I915_FORMAT_MOD_Y_TILED && - fb->modifier != I915_FORMAT_MOD_Yf_TILED && - fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS && - fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS) - return 8; - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - src_w = drm_rect_width(&intel_pstate->base.src) >> 16; - src_h = drm_rect_height(&intel_pstate->base.src) >> 16; - - /* Halve UV plane width and height for NV12 */ - if (plane == 1) { - src_w /= 2; - src_h /= 2; - } - - plane_bpp = fb->format->cpp[plane]; - - if (drm_rotation_90_or_270(pstate->rotation)) { - switch (plane_bpp) { - case 1: - min_scanlines = 32; - break; - case 2: - min_scanlines = 16; - break; - case 4: - min_scanlines = 8; - break; - case 8: - min_scanlines = 4; - break; - default: - WARN(1, "Unsupported pixel depth %u for rotation", - plane_bpp); - min_scanlines = 32; - } - } - - return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; -} - -static void -skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, - uint16_t *minimum, uint16_t *uv_minimum) -{ - const struct drm_plane_state *pstate; - struct drm_plane *plane; - - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { - enum plane_id plane_id = to_intel_plane(plane)->id; - struct intel_plane_state *plane_state = to_intel_plane_state(pstate); - - if (plane_id == PLANE_CURSOR) - continue; - - /* slave plane must be invisible and calculated from master */ - if (!pstate->visible || WARN_ON(plane_state->slave)) - continue; - - if (!plane_state->linked_plane) { - minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); - uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); - } else { - enum plane_id y_plane_id = - plane_state->linked_plane->id; - - minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0); - minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); - } - } - - minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); -} - static int skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb /* out */) @@ -4411,15 +4344,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; - uint16_t alloc_size, start; - uint16_t minimum[I915_MAX_PLANES] = {}; - uint16_t uv_minimum[I915_MAX_PLANES] = {}; + u16 alloc_size, start = 0; + u16 total[I915_MAX_PLANES] = {}; + u16 uv_total[I915_MAX_PLANES] = {}; u64 total_data_rate; enum plane_id plane_id; int num_active; u64 plane_data_rate[I915_MAX_PLANES] = {}; u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; - uint16_t total_min_blocks = 0; + u32 blocks; + int level; /* Clear the partitioning for disabled planes. */ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); @@ -4449,81 +4383,172 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, if (alloc_size == 0) return 0; - skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum); + /* Allocate fixed number of blocks for cursor. */ + total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active); + alloc_size -= total[PLANE_CURSOR]; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = + alloc->end - total[PLANE_CURSOR]; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; + + if (total_data_rate == 0) + return 0; /* - * 1. Allocate the mininum required blocks for each active plane - * and allocate the cursor, it doesn't require extra allocation - * proportional to the data rate. + * Find the highest watermark level for which we can satisfy the block + * requirement of active planes. */ + for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { + blocks = 0; + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + const struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) { + if (WARN_ON(wm->wm[level].min_ddb_alloc > + total[PLANE_CURSOR])) { + blocks = U32_MAX; + break; + } + continue; + } - for_each_plane_id_on_crtc(intel_crtc, plane_id) { - total_min_blocks += minimum[plane_id]; - total_min_blocks += uv_minimum[plane_id]; + blocks += wm->wm[level].min_ddb_alloc; + blocks += wm->uv_wm[level].min_ddb_alloc; + } + + if (blocks <= alloc_size) { + alloc_size -= blocks; + break; + } } - if (total_min_blocks > alloc_size) { + if (level < 0) { DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations"); - DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks, - alloc_size); + DRM_DEBUG_KMS("minimum required %d/%d\n", blocks, + alloc_size); return -EINVAL; } - alloc_size -= total_min_blocks; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; - /* - * 2. Distribute the remaining space in proportion to the amount of - * data each plane needs to fetch from memory. - * - * FIXME: we may not allocate every single block here. + * Grant each plane the blocks it requires at the highest achievable + * watermark level, plus an extra share of the leftover blocks + * proportional to its relative data rate. */ - if (total_data_rate == 0) - return 0; - - start = alloc->start; for_each_plane_id_on_crtc(intel_crtc, plane_id) { - u64 data_rate, uv_data_rate; - uint16_t plane_blocks, uv_plane_blocks; + const struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + u64 rate; + u16 extra; if (plane_id == PLANE_CURSOR) continue; - data_rate = plane_data_rate[plane_id]; - /* - * allocation for (packed formats) or (uv-plane part of planar format): - * promote the expression to 64 bits to avoid overflowing, the - * result is < available as data_rate / total_data_rate < 1 + * We've accounted for all active planes; remaining planes are + * all disabled. */ - plane_blocks = minimum[plane_id]; - plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate); + if (total_data_rate == 0) + break; - /* Leave disabled planes at (0,0) */ - if (data_rate) { - cstate->wm.skl.plane_ddb_y[plane_id].start = start; - cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks; - } + rate = plane_data_rate[plane_id]; + extra = min_t(u16, alloc_size, + DIV64_U64_ROUND_UP(alloc_size * rate, + total_data_rate)); + total[plane_id] = wm->wm[level].min_ddb_alloc + extra; + alloc_size -= extra; + total_data_rate -= rate; - start += plane_blocks; + if (total_data_rate == 0) + break; - /* Allocate DDB for UV plane for planar format/NV12 */ - uv_data_rate = uv_plane_data_rate[plane_id]; + rate = uv_plane_data_rate[plane_id]; + extra = min_t(u16, alloc_size, + DIV64_U64_ROUND_UP(alloc_size * rate, + total_data_rate)); + uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra; + alloc_size -= extra; + total_data_rate -= rate; + } + WARN_ON(alloc_size != 0 || total_data_rate != 0); - uv_plane_blocks = uv_minimum[plane_id]; - uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate); + /* Set the actual DDB start/end points for each plane */ + start = alloc->start; + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + struct skl_ddb_entry *plane_alloc = + &cstate->wm.skl.plane_ddb_y[plane_id]; + struct skl_ddb_entry *uv_plane_alloc = + &cstate->wm.skl.plane_ddb_uv[plane_id]; + + if (plane_id == PLANE_CURSOR) + continue; /* Gen11+ uses a separate plane for UV watermarks */ - WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); + WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]); - if (uv_data_rate) { - cstate->wm.skl.plane_ddb_uv[plane_id].start = start; - cstate->wm.skl.plane_ddb_uv[plane_id].end = - start + uv_plane_blocks; + /* Leave disabled planes at (0,0) */ + if (total[plane_id]) { + plane_alloc->start = start; + start += total[plane_id]; + plane_alloc->end = start; } - start += uv_plane_blocks; + if (uv_total[plane_id]) { + uv_plane_alloc->start = start; + start += uv_total[plane_id]; + uv_plane_alloc->end = start; + } + } + + /* + * When we calculated watermark values we didn't know how high + * of a level we'd actually be able to hit, so we just marked + * all levels as "enabled." Go back now and disable the ones + * that aren't actually possible. + */ + for (level++; level <= ilk_wm_max_level(dev_priv); level++) { + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + + /* + * We only disable the watermarks for each plane if + * they exceed the ddb allocation of said plane. This + * is done so that we don't end up touching cursor + * watermarks needlessly when some other plane reduces + * our max possible watermark level. + * + * Bspec has this to say about the PLANE_WM enable bit: + * "All the watermarks at this level for all enabled + * planes must be enabled before the level will be used." + * So this is actually safe to do. + */ + if (wm->wm[level].min_ddb_alloc > total[plane_id] || + wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id]) + memset(&wm->wm[level], 0, sizeof(wm->wm[level])); + + /* + * Wa_1408961008:icl + * Underruns with WM1+ disabled + */ + if (IS_ICELAKE(dev_priv) && + level == 1 && wm->wm[0].plane_en) { + wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; + wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; + wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + } + } + } + + /* + * Go back and disable the transition watermark if it turns out we + * don't have enough DDB blocks for it. + */ + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + + if (wm->trans_wm.plane_res_b >= total[plane_id]) + memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); } return 0; @@ -4536,10 +4561,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ static uint_fixed_16_16_t -skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate, - uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size) +skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate, + u8 cpp, u32 latency, u32 dbuf_block_size) { - uint32_t wm_intermediate_val; + u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) @@ -4554,12 +4579,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate, return ret; } -static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, - uint32_t pipe_htotal, - uint32_t latency, - uint_fixed_16_16_t plane_blocks_per_line) +static uint_fixed_16_16_t +skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, + uint_fixed_16_16_t plane_blocks_per_line) { - uint32_t wm_intermediate_val; + u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) @@ -4575,8 +4599,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, static uint_fixed_16_16_t intel_get_linetime_us(const struct intel_crtc_state *cstate) { - uint32_t pixel_rate; - uint32_t crtc_htotal; + u32 pixel_rate; + u32 crtc_htotal; uint_fixed_16_16_t linetime_us; if (!cstate->base.active) @@ -4593,11 +4617,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate) return linetime_us; } -static uint32_t +static u32 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { - uint64_t adjusted_pixel_rate; + u64 adjusted_pixel_rate; uint_fixed_16_16_t downscale_amount; /* Shouldn't reach here on disabled planes... */ @@ -4616,60 +4640,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, } static int -skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, - struct skl_wm_params *wp, int color_plane) +skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane) { - struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_plane_state *pstate = &intel_pstate->base; - const struct drm_framebuffer *fb = pstate->fb; - uint32_t interm_pbpl; - struct intel_atomic_state *state = - to_intel_atomic_state(cstate->base.state); - bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 interm_pbpl; - /* only NV12 format has two planes */ - if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { - DRM_DEBUG_KMS("Non NV12 format have single plane\n"); + /* only planar format has two planes */ + if (color_plane == 1 && !is_planar_yuv_format(format->format)) { + DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } - wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; - wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = fb->format->format == DRM_FORMAT_NV12; - - if (plane->id == PLANE_CURSOR) { - wp->width = intel_pstate->base.crtc_w; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; - } + wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || + modifier == I915_FORMAT_MOD_Yf_TILED || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; + wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->is_planar = is_planar_yuv_format(format->format); + wp->width = width; if (color_plane == 1 && wp->is_planar) wp->width /= 2; - wp->cpp = fb->format->cpp[color_plane]; - wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, - intel_pstate); + wp->cpp = format->cpp[color_plane]; + wp->plane_pixel_rate = plane_pixel_rate; if (INTEL_GEN(dev_priv) >= 11 && - fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8) + modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) wp->dbuf_block_size = 256; else wp->dbuf_block_size = 512; - if (drm_rotation_90_or_270(pstate->rotation)) { - + if (drm_rotation_90_or_270(rotation)) { switch (wp->cpp) { case 1: wp->y_min_scanlines = 16; @@ -4688,7 +4697,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, wp->y_min_scanlines = 4; } - if (apply_memory_bw_wa) + if (skl_needs_memory_bw_wa(dev_priv)) wp->y_min_scanlines *= 2; wp->plane_bytes_per_line = wp->width * wp->cpp; @@ -4702,7 +4711,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, wp->plane_blocks_per_line = div_fixed16(interm_pbpl, wp->y_min_scanlines); - } else if (wp->x_tiled && IS_GEN9(dev_priv)) { + } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) { interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, wp->dbuf_block_size); wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); @@ -4714,34 +4723,66 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, wp->plane_blocks_per_line); + wp->linetime_us = fixed16_to_u32_round_up( - intel_get_linetime_us(cstate)); + intel_get_linetime_us(crtc_state)); return 0; } -static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, - uint16_t ddb_allocation, - int level, - const struct skl_wm_params *wp, - const struct skl_wm_level *result_prev, - struct skl_wm_level *result /* out */) +static int +skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct skl_wm_params *wp, int color_plane) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; + int width; + + if (plane->id == PLANE_CURSOR) { + width = plane_state->base.crtc_w; + } else { + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->base.src) >> 16; + } + + return skl_compute_wm_params(crtc_state, width, + fb->format, fb->modifier, + plane_state->base.rotation, + skl_adjusted_plane_pixel_rate(crtc_state, plane_state), + wp, color_plane); +} + +static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) { - struct drm_i915_private *dev_priv = - to_i915(intel_pstate->base.plane->dev); - const struct drm_plane_state *pstate = &intel_pstate->base; - uint32_t latency = dev_priv->wm.skl_latency[level]; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + return true; + + /* The number of lines are ignored for the level 0 watermark. */ + return level > 0; +} + +static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, + int level, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */) +{ + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; - uint32_t res_blocks, res_lines; - struct intel_atomic_state *state = - to_intel_atomic_state(cstate->base.state); - bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); - uint32_t min_disp_buf_needed; + u32 res_blocks, res_lines, min_ddb_alloc = 0; - if (latency == 0) - return level == 0 ? -EINVAL : 0; + if (latency == 0) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; + } /* Display WA #1141: kbl,cfl */ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || @@ -4749,7 +4790,7 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, dev_priv->ipc_enabled) latency += 4; - if (apply_memory_bw_wa && wp->x_tiled) + if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled) latency += 15; method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, @@ -4766,15 +4807,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; - } else if (ddb_allocation >= - fixed16_to_u32_round_up(wp->plane_blocks_per_line)) { - if (IS_GEN9(dev_priv) && - !IS_GEMINILAKE(dev_priv)) - selected_result = min_fixed16(method1, method2); - else - selected_result = method2; } else if (latency >= wp->linetime_us) { - if (IS_GEN9(dev_priv) && + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) selected_result = min_fixed16(method1, method2); else @@ -4788,131 +4822,105 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, res_lines = div_round_up_fixed16(selected_result, wp->plane_blocks_per_line); - /* Display WA #1125: skl,bxt,kbl,glk */ - if (level == 0 && wp->rc_surface) - res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + /* Display WA #1125: skl,bxt,kbl */ + if (level == 0 && wp->rc_surface) + res_blocks += + fixed16_to_u32_round_up(wp->y_tile_minimum); + + /* Display WA #1126: skl,bxt,kbl */ + if (level >= 1 && level <= 7) { + if (wp->y_tiled) { + res_blocks += + fixed16_to_u32_round_up(wp->y_tile_minimum); + res_lines += wp->y_min_scanlines; + } else { + res_blocks++; + } - /* Display WA #1126: skl,bxt,kbl,glk */ - if (level >= 1 && level <= 7) { - if (wp->y_tiled) { - res_blocks += fixed16_to_u32_round_up( - wp->y_tile_minimum); - res_lines += wp->y_min_scanlines; - } else { - res_blocks++; + /* + * Make sure result blocks for higher latency levels are + * atleast as high as level below the current level. + * Assumption in DDB algorithm optimization for special + * cases. Also covers Display WA #1125 for RC. + */ + if (result_prev->plane_res_b > res_blocks) + res_blocks = result_prev->plane_res_b; } - - /* - * Make sure result blocks for higher latency levels are atleast - * as high as level below the current level. - * Assumption in DDB algorithm optimization for special cases. - * Also covers Display WA #1125 for RC. - */ - if (result_prev->plane_res_b > res_blocks) - res_blocks = result_prev->plane_res_b; } if (INTEL_GEN(dev_priv) >= 11) { if (wp->y_tiled) { - uint32_t extra_lines; - uint_fixed_16_16_t fp_min_disp_buf_needed; + int extra_lines; if (res_lines % wp->y_min_scanlines == 0) extra_lines = wp->y_min_scanlines; else extra_lines = wp->y_min_scanlines * 2 - - res_lines % wp->y_min_scanlines; + res_lines % wp->y_min_scanlines; - fp_min_disp_buf_needed = mul_u32_fixed16(res_lines + - extra_lines, - wp->plane_blocks_per_line); - min_disp_buf_needed = fixed16_to_u32_round_up( - fp_min_disp_buf_needed); + min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines, + wp->plane_blocks_per_line); } else { - min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10); + min_ddb_alloc = res_blocks + + DIV_ROUND_UP(res_blocks, 10); } - } else { - min_disp_buf_needed = res_blocks; } - if ((level > 0 && res_lines > 31) || - res_blocks >= ddb_allocation || - min_disp_buf_needed >= ddb_allocation) { - /* - * If there are no valid level 0 watermarks, then we can't - * support this display configuration. - */ - if (level) { - return 0; - } else { - struct drm_plane *plane = pstate->plane; + if (!skl_wm_has_lines(dev_priv, level)) + res_lines = 0; - DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", - plane->base.id, plane->name, - res_blocks, ddb_allocation, res_lines); - return -EINVAL; - } + if (res_lines > 31) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; } - /* The number of lines are ignored for the level 0 watermark. */ + /* + * If res_lines is valid, assume we can use this watermark level + * for now. We'll come back and disable it after we calculate the + * DDB allocation if it turns out we don't actually have enough + * blocks to satisfy it. + */ result->plane_res_b = res_blocks; result->plane_res_l = res_lines; + /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ + result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1; result->plane_en = true; - - return 0; } -static int +static void skl_compute_wm_levels(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, - uint16_t ddb_blocks, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *dev_priv = - to_i915(intel_pstate->base.plane->dev); + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; - int ret; for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; - ret = skl_compute_plane_wm(cstate, - intel_pstate, - ddb_blocks, - level, - wm_params, - result_prev, - result); - if (ret) - return ret; + skl_compute_plane_wm(cstate, level, wm_params, + result_prev, result); result_prev = result; } - - return 0; } -static uint32_t +static u32 skl_compute_linetime_wm(const struct intel_crtc_state *cstate) { struct drm_atomic_state *state = cstate->base.state; struct drm_i915_private *dev_priv = to_i915(state->dev); uint_fixed_16_16_t linetime_us; - uint32_t linetime_wm; + u32 linetime_wm; linetime_us = intel_get_linetime_us(cstate); - - if (is_fixed16_zero(linetime_us)) - return 0; - linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); - /* Display WA #1135: bxt:ALL GLK:ALL */ - if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) && - dev_priv->ipc_enabled) + /* Display WA #1135: BXT:ALL GLK:ALL */ + if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) linetime_wm /= 2; return linetime_wm; @@ -4920,14 +4928,13 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate) static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, const struct skl_wm_params *wp, - struct skl_plane_wm *wm, - uint16_t ddb_allocation) + struct skl_plane_wm *wm) { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - uint16_t trans_min, trans_y_tile_min; - const uint16_t trans_amount = 10; /* This is configurable amount */ - uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; + u16 trans_min, trans_y_tile_min; + const u16 trans_amount = 10; /* This is configurable amount */ + u16 wm0_sel_res_b, trans_offset_b, res_blocks; /* Transition WM are not recommended by HW team for GEN9 */ if (INTEL_GEN(dev_priv) <= 9) @@ -4956,8 +4963,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, wm0_sel_res_b = wm->wm[0].plane_res_b - 1; if (wp->y_tiled) { - trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, - wp->y_tile_minimum); + trans_y_tile_min = + (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + trans_offset_b; } else { @@ -4969,12 +4976,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, } - res_blocks += 1; - - if (res_blocks < ddb_allocation) { - wm->trans_wm.plane_res_b = res_blocks; - wm->trans_wm.plane_en = true; - } + /* + * Just assume we can enable the transition watermark. After + * computing the DDB we'll come back and disable it if that + * assumption turns out to be false. + */ + wm->trans_wm.plane_res_b = res_blocks + 1; + wm->trans_wm.plane_en = true; } static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, @@ -4982,7 +4990,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, enum plane_id plane_id, int color_plane) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]); struct skl_wm_params wm_params; int ret; @@ -4991,12 +4998,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = skl_compute_wm_levels(crtc_state, plane_state, - ddb_blocks, &wm_params, wm->wm); - if (ret) - return ret; - - skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks); + skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); + skl_compute_transition_wm(crtc_state, &wm_params, wm); return 0; } @@ -5006,7 +5009,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, enum plane_id plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]); struct skl_wm_params wm_params; int ret; @@ -5018,16 +5020,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = skl_compute_wm_levels(crtc_state, plane_state, - ddb_blocks, &wm_params, wm->uv_wm); - if (ret) - return ret; + skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); return 0; } -static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, - struct intel_crtc_state *crtc_state, +static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); @@ -5053,8 +5051,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, return 0; } -static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, - struct intel_crtc_state *crtc_state, +static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; @@ -5091,10 +5088,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, return 0; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate, - struct skl_pipe_wm *pipe_wm) +static int skl_build_pipe_wm(struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; struct drm_crtc_state *crtc_state = &cstate->base; struct drm_plane *plane; const struct drm_plane_state *pstate; @@ -5111,11 +5108,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, to_intel_plane_state(pstate); if (INTEL_GEN(dev_priv) >= 11) - ret = icl_build_plane_wm(pipe_wm, - cstate, intel_pstate); + ret = icl_build_plane_wm(cstate, intel_pstate); else - ret = skl_build_plane_wm(pipe_wm, - cstate, intel_pstate); + ret = skl_build_plane_wm(cstate, intel_pstate); if (ret) return ret; } @@ -5139,13 +5134,14 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, i915_reg_t reg, const struct skl_wm_level *level) { - uint32_t val = 0; + u32 val = 0; - if (level->plane_en) { + if (level->plane_en) val |= PLANE_WM_EN; - val |= level->plane_res_b; - val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; - } + if (level->ignore_lines) + val |= PLANE_WM_IGNORE_LINES; + val |= level->plane_res_b; + val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; I915_WRITE_FW(reg, val); } @@ -5211,6 +5207,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { return l1->plane_en == l2->plane_en && + l1->ignore_lines == l2->ignore_lines && l1->plane_res_l == l2->plane_res_l && l1->plane_res_b == l2->plane_res_b; } @@ -5230,6 +5227,23 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); } +static bool skl_pipe_wm_equals(struct intel_crtc *crtc, + const struct skl_pipe_wm *wm1, + const struct skl_pipe_wm *wm2) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + if (!skl_plane_wm_equals(dev_priv, + &wm1->planes[plane_id], + &wm2->planes[plane_id])) + return false; + } + + return wm1->linetime == wm2->linetime; +} + static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, const struct skl_ddb_entry *b) { @@ -5251,35 +5265,15 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, return false; } -static int skl_update_pipe_wm(struct drm_crtc_state *cstate, - const struct skl_pipe_wm *old_pipe_wm, - struct skl_pipe_wm *pipe_wm, /* out */ - bool *changed /* out */) +static u32 +pipes_modified(struct intel_atomic_state *state) { - struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); - int ret; - - ret = skl_build_pipe_wm(intel_cstate, pipe_wm); - if (ret) - return ret; - - if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm))) - *changed = false; - else - *changed = true; - - return 0; -} - -static uint32_t -pipes_modified(struct drm_atomic_state *state) -{ - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - uint32_t i, ret = 0; + struct intel_crtc *crtc; + struct intel_crtc_state *cstate; + u32 i, ret = 0; - for_each_new_crtc_in_state(state, crtc, cstate, i) - ret |= drm_crtc_mask(crtc); + for_each_new_intel_crtc_in_state(state, crtc, cstate, i) + ret |= drm_crtc_mask(&crtc->base); return ret; } @@ -5314,11 +5308,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, } static int -skl_compute_ddb(struct drm_atomic_state *state) +skl_compute_ddb(struct intel_atomic_state *state) { - const struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + const struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct skl_ddb_allocation *ddb = &state->wm_results.ddb; struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; @@ -5326,7 +5319,7 @@ skl_compute_ddb(struct drm_atomic_state *state) memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); - for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state, + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); if (ret) @@ -5341,6 +5334,11 @@ skl_compute_ddb(struct drm_atomic_state *state) return 0; } +static char enast(bool enable) +{ + return enable ? '*' : ' '; +} + static void skl_print_wm_changes(struct intel_atomic_state *state) { @@ -5351,8 +5349,16 @@ skl_print_wm_changes(struct intel_atomic_state *state) struct intel_crtc *crtc; int i; + if ((drm_debug & DRM_UT_KMS) == 0) + return; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; + + old_pipe_wm = &old_crtc_state->wm.skl.optimal; + new_pipe_wm = &new_crtc_state->wm.skl.optimal; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; @@ -5363,24 +5369,98 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", + DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + } + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_plane_wm *old_wm, *new_wm; + + old_wm = &old_pipe_wm->planes[plane_id]; + new_wm = &new_pipe_wm->planes[plane_id]; + + if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) + continue; + + DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), + enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), + enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), + enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), + enast(old_wm->trans_wm.plane_en), + enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), + enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), + enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), + enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), + enast(new_wm->trans_wm.plane_en)); + + DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", plane->base.base.id, plane->base.name, - old->start, old->end, - new->start, new->end); + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, + + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); + + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, + old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, + old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, + old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, + old_wm->trans_wm.plane_res_b, + new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, + new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, + new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, + new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, + new_wm->trans_wm.plane_res_b); + + DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc); } } } static int -skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) +skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) { - struct drm_device *dev = state->dev; + struct drm_device *dev = state->base.dev; const struct drm_i915_private *dev_priv = to_i915(dev); - const struct drm_crtc *crtc; - const struct drm_crtc_state *cstate; - struct intel_crtc *intel_crtc; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - uint32_t realloc_pipes = pipes_modified(state); + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + u32 realloc_pipes = pipes_modified(state); int ret, i; /* @@ -5398,7 +5478,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) * since any racing commits that want to update them would need to * hold _all_ CRTC state mutexes. */ - for_each_new_crtc_in_state(state, crtc, cstate, i) + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) (*changed) = true; if (!*changed) @@ -5412,20 +5492,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) */ if (dev_priv->wm.distrust_bios_wm) { ret = drm_modeset_lock(&dev->mode_config.connection_mutex, - state->acquire_ctx); + state->base.acquire_ctx); if (ret) return ret; - intel_state->active_pipe_changes = ~0; + state->active_pipe_changes = ~0; /* - * We usually only initialize intel_state->active_crtcs if we + * We usually only initialize state->active_crtcs if we * we're doing a modeset; make sure this field is always * initialized during the sanitization process that happens * on the first commit too. */ - if (!intel_state->modeset) - intel_state->active_crtcs = dev_priv->active_crtcs; + if (!state->modeset) + state->active_crtcs = dev_priv->active_crtcs; } /* @@ -5441,21 +5521,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) * any other display updates race with this transaction, so we need * to grab the lock on *all* CRTC's. */ - if (intel_state->active_pipe_changes || intel_state->modeset) { + if (state->active_pipe_changes || state->modeset) { realloc_pipes = ~0; - intel_state->wm_results.dirty_pipes = ~0; + state->wm_results.dirty_pipes = ~0; } /* * We're not recomputing for the pipes not included in the commit, so * make sure we start with the current state. */ - for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { - struct intel_crtc_state *cstate; - - cstate = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(cstate)) - return PTR_ERR(cstate); + for_each_intel_crtc_mask(dev, crtc, realloc_pipes) { + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); } return 0; @@ -5522,13 +5600,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, } static int -skl_compute_wm(struct drm_atomic_state *state) +skl_compute_wm(struct intel_atomic_state *state) { - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct skl_ddb_values *results = &intel_state->wm_results; - struct skl_pipe_wm *pipe_wm; + struct intel_crtc *crtc; + struct intel_crtc_state *new_crtc_state; + struct intel_crtc_state *old_crtc_state; + struct skl_ddb_values *results = &state->wm_results; bool changed = false; int ret, i; @@ -5539,47 +5616,33 @@ skl_compute_wm(struct drm_atomic_state *state) if (ret || !changed) return ret; - ret = skl_compute_ddb(state); - if (ret) - return ret; - /* * Calculate WM's for all pipes that are part of this transaction. - * Note that the DDB allocation above may have added more CRTC's that + * Note that skl_ddb_add_affected_pipes may have added more CRTC's that * weren't otherwise being modified (and set bits in dirty_pipes) if * pipe allocations had to change. - * - * FIXME: Now that we're doing this in the atomic check phase, we - * should allow skl_update_pipe_wm() to return failure in cases where - * no suitable watermark values can be found. */ - for_each_new_crtc_in_state(state, crtc, cstate, i) { - struct intel_crtc_state *intel_cstate = - to_intel_crtc_state(cstate); - const struct skl_pipe_wm *old_pipe_wm = - &to_intel_crtc_state(crtc->state)->wm.skl.optimal; - - pipe_wm = &intel_cstate->wm.skl.optimal; - ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_build_pipe_wm(new_crtc_state); if (ret) return ret; - ret = skl_wm_add_affected_planes(intel_state, - to_intel_crtc(crtc)); + ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; - if (changed) - results->dirty_pipes |= drm_crtc_mask(crtc); - - if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) - /* This pipe's WM's did not change */ - continue; - - intel_cstate->update_wm_pre = true; + if (!skl_pipe_wm_equals(crtc, + &old_crtc_state->wm.skl.optimal, + &new_crtc_state->wm.skl.optimal)) + results->dirty_pipes |= drm_crtc_mask(&crtc->base); } - skl_print_wm_changes(intel_state); + ret = skl_compute_ddb(state); + if (ret) + return ret; + + skl_print_wm_changes(state); return 0; } @@ -5617,13 +5680,13 @@ static void skl_initial_wm(struct intel_atomic_state *state, mutex_unlock(&dev_priv->wm.wm_mutex); } -static void ilk_compute_wm_config(struct drm_device *dev, +static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, struct intel_wm_config *config) { struct intel_crtc *crtc; /* Compute the currently _active_ config */ - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; if (!wm->pipe_enabled) @@ -5637,25 +5700,24 @@ static void ilk_compute_wm_config(struct drm_device *dev, static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; struct intel_wm_config config = {}; struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; - ilk_compute_wm_config(dev, &config); + ilk_compute_wm_config(dev_priv, &config); - ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ if (INTEL_GEN(dev_priv) >= 7 && config.num_pipes_active == 1 && config.sprites_enabled) { - ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); - best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); + best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); } else { best_lp_wm = &lp_wm_1_2; } @@ -5663,7 +5725,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) partitioning = (best_lp_wm == &lp_wm_1_2) ? INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; - ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); + ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); ilk_write_wm_values(dev_priv, &results); } @@ -5694,28 +5756,28 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, mutex_unlock(&dev_priv->wm.wm_mutex); } -static inline void skl_wm_level_from_reg_val(uint32_t val, +static inline void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->plane_en = val & PLANE_WM_EN; + level->ignore_lines = val & PLANE_WM_IGNORE_LINES; level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & PLANE_WM_LINES_MASK; } -void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, +void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; int level, max_level; enum plane_id plane_id; - uint32_t val; + u32 val; max_level = ilk_wm_max_level(dev_priv); - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &out->planes[plane_id]; for (level = 0; level <= max_level; level++) { @@ -5735,30 +5797,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, skl_wm_level_from_reg_val(val, &wm->trans_wm); } - if (!intel_crtc->active) + if (!crtc->active) return; out->linetime = I915_READ(PIPE_WM_LINETIME(pipe)); } -void skl_wm_get_hw_state(struct drm_device *dev) +void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_values *hw = &dev_priv->wm.skl_hw; struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct intel_crtc *crtc; struct intel_crtc_state *cstate; skl_ddb_get_hw_state(dev_priv, ddb); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - intel_crtc = to_intel_crtc(crtc); - cstate = to_intel_crtc_state(crtc->state); + for_each_intel_crtc(&dev_priv->drm, crtc) { + cstate = to_intel_crtc_state(crtc->base.state); skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); - if (intel_crtc->active) - hw->dirty_pipes |= drm_crtc_mask(crtc); + if (crtc->active) + hw->dirty_pipes |= drm_crtc_mask(&crtc->base); } if (dev_priv->active_crtcs) { @@ -5767,15 +5826,14 @@ void skl_wm_get_hw_state(struct drm_device *dev) } } -static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) +static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, [PIPE_B] = WM0_PIPEB_ILK, @@ -5788,7 +5846,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) memset(active, 0, sizeof(*active)); - active->pipe_enabled = intel_crtc->active; + active->pipe_enabled = crtc->active; if (active->pipe_enabled) { u32 tmp = hw->wm_pipe[pipe]; @@ -5816,7 +5874,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) active->wm[level].enable = true; } - intel_crtc->wm.active.ilk = *active; + crtc->wm.active.ilk = *active; } #define _FW_WM(value, plane) \ @@ -5827,7 +5885,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) static void g4x_read_wm_values(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); @@ -5854,7 +5912,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { enum pipe pipe; - uint32_t tmp; + u32 tmp; for_each_pipe(dev_priv, pipe) { tmp = I915_READ(VLV_DDL(pipe)); @@ -5926,9 +5984,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, #undef _FW_WM #undef _FW_WM_VLV -void g4x_wm_get_hw_state(struct drm_device *dev) +void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct g4x_wm_values *wm = &dev_priv->wm.g4x; struct intel_crtc *crtc; @@ -5936,7 +5993,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev) wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct g4x_wm_state *active = &crtc->wm.active.g4x; @@ -6067,9 +6124,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->wm.wm_mutex); } -void vlv_wm_get_hw_state(struct drm_device *dev) +void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct vlv_wm_values *wm = &dev_priv->wm.vlv; struct intel_crtc *crtc; u32 val; @@ -6082,7 +6138,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) if (IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (val & DSP_MAXFIFO_PM5_ENABLE) wm->level = VLV_WM_LEVEL_PM5; @@ -6113,7 +6169,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) mutex_unlock(&dev_priv->pcu_lock); } - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct vlv_wm_state *active = &crtc->wm.active.vlv; @@ -6230,15 +6286,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) */ } -void ilk_wm_get_hw_state(struct drm_device *dev) +void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; - struct drm_crtc *crtc; + struct intel_crtc *crtc; ilk_init_lp_watermarks(dev_priv); - for_each_crtc(dev, crtc) + for_each_intel_crtc(&dev_priv->drm, crtc) ilk_pipe_wm_get_hw_state(crtc); hw->wm_lp[0] = I915_READ(WM1_LP_ILK); @@ -6339,10 +6394,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) */ DEFINE_SPINLOCK(mchdev_lock); -/* Global for IPS driver to get at the current i915 device. Protected by - * mchdev_lock. */ -static struct drm_i915_private *i915_mch_dev; - bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) { u16 rgvswctl; @@ -6730,9 +6781,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) * punit into committing the voltage change) as that takes a lot less * power than the render powerwell. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA); err = valleyview_set_rps(dev_priv, val); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA); if (err) DRM_ERROR("Failed to set RPS for idle\n"); @@ -6792,8 +6843,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->pcu_lock); } -void gen6_rps_boost(struct i915_request *rq, - struct intel_rps_client *rps_client) +void gen6_rps_boost(struct i915_request *rq) { struct intel_rps *rps = &rq->i915->gt_pm.rps; unsigned long flags; @@ -6805,7 +6855,7 @@ void gen6_rps_boost(struct i915_request *rq, if (!rps->enabled) return; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + if (i915_request_signaled(rq)) return; /* Serializes with i915_request_retire() */ @@ -6822,7 +6872,7 @@ void gen6_rps_boost(struct i915_request *rq, if (READ_ONCE(rps->cur_freq) < rps->boost_freq) schedule_work(&rps->work); - atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts); + atomic_inc(&rps->boosts); } int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) @@ -6883,11 +6933,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) { /* We're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); I915_WRITE(GEN6_RC_CONTROL, 0); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_disable_rps(struct drm_i915_private *dev_priv) @@ -7046,10 +7096,10 @@ static void reset_rps(struct drm_i915_private *dev_priv, /* See the Gen9_GT_PM_Programming_Guide doc for the below */ static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Program defaults and thresholds for RPS */ - if (IS_GEN9(dev_priv)) + if (IS_GEN(dev_priv, 9)) I915_WRITE(GEN6_RC_VIDEO_FREQ, GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq)); @@ -7064,7 +7114,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen9_enable_rc6(struct drm_i915_private *dev_priv) @@ -7078,7 +7128,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) /* 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7155,7 +7205,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_PG_ENABLE, GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen8_enable_rc6(struct drm_i915_private *dev_priv) @@ -7168,7 +7218,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv) /* 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7189,14 +7239,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv) GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_RC6_ENABLE); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen8_enable_rps(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 1 Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RPNSWREQ, @@ -7229,7 +7279,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_enable_rc6(struct drm_i915_private *dev_priv) @@ -7249,7 +7299,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GTFIFODBG, gtfifodbg); } - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7285,9 +7335,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev_priv) && ret) { + if (IS_GEN(dev_priv, 6) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; @@ -7297,7 +7347,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_enable_rps(struct drm_i915_private *dev_priv) @@ -7308,7 +7358,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) * Perhaps there might be some value in exposing these to * userspace... */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Power down if completely idle for over 50ms */ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); @@ -7316,7 +7366,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) @@ -7412,7 +7462,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (INTEL_INFO(dev_priv)->sseu.eu_total) { + switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) { case 8: /* (2 * 4) config */ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); @@ -7739,7 +7789,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) /* 1a & 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7771,14 +7821,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 1: Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); @@ -7813,7 +7863,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, valleyview_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) @@ -7831,7 +7881,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GTFIFODBG, gtfifodbg); } - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7856,14 +7906,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); @@ -7897,7 +7947,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, valleyview_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static unsigned long intel_pxfreq(u32 vidfreq) @@ -7985,16 +8035,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - unsigned long val; + intel_wakeref_t wakeref; + unsigned long val = 0; - if (!IS_GEN5(dev_priv)) + if (!IS_GEN(dev_priv, 5)) return 0; - spin_lock_irq(&mchdev_lock); - - val = __i915_chipset_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + val = __i915_chipset_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } return val; } @@ -8071,14 +8122,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - if (!IS_GEN5(dev_priv)) - return; - - spin_lock_irq(&mchdev_lock); + intel_wakeref_t wakeref; - __i915_update_gfx_val(dev_priv); + if (!IS_GEN(dev_priv, 5)) + return; - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + __i915_update_gfx_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } } static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) @@ -8120,18 +8173,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - unsigned long val; + intel_wakeref_t wakeref; + unsigned long val = 0; - if (!IS_GEN5(dev_priv)) + if (!IS_GEN(dev_priv, 5)) return 0; - spin_lock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + val = __i915_gfx_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } - val = __i915_gfx_val(dev_priv); + return val; +} - spin_unlock_irq(&mchdev_lock); +static struct drm_i915_private *i915_mch_dev; - return val; +static struct drm_i915_private *mchdev_get(void) +{ + struct drm_i915_private *i915; + + rcu_read_lock(); + i915 = i915_mch_dev; + if (!kref_get_unless_zero(&i915->drm.ref)) + i915 = NULL; + rcu_read_unlock(); + + return i915; } /** @@ -8142,23 +8211,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) */ unsigned long i915_read_mch_val(void) { - struct drm_i915_private *dev_priv; - unsigned long chipset_val, graphics_val, ret = 0; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) - goto out_unlock; - dev_priv = i915_mch_dev; + struct drm_i915_private *i915; + unsigned long chipset_val = 0; + unsigned long graphics_val = 0; + intel_wakeref_t wakeref; - chipset_val = __i915_chipset_val(dev_priv); - graphics_val = __i915_gfx_val(dev_priv); - - ret = chipset_val + graphics_val; + i915 = mchdev_get(); + if (!i915) + return 0; -out_unlock: - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(i915, wakeref) { + spin_lock_irq(&mchdev_lock); + chipset_val = __i915_chipset_val(i915); + graphics_val = __i915_gfx_val(i915); + spin_unlock_irq(&mchdev_lock); + } - return ret; + drm_dev_put(&i915->drm); + return chipset_val + graphics_val; } EXPORT_SYMBOL_GPL(i915_read_mch_val); @@ -8169,23 +8239,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val); */ bool i915_gpu_raise(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; + struct drm_i915_private *i915; - if (dev_priv->ips.max_delay > dev_priv->ips.fmax) - dev_priv->ips.max_delay--; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + if (i915->ips.max_delay > i915->ips.fmax) + i915->ips.max_delay--; spin_unlock_irq(&mchdev_lock); - return ret; + drm_dev_put(&i915->drm); + return true; } EXPORT_SYMBOL_GPL(i915_gpu_raise); @@ -8197,23 +8263,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise); */ bool i915_gpu_lower(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; + struct drm_i915_private *i915; - if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) - dev_priv->ips.max_delay++; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + if (i915->ips.max_delay < i915->ips.min_delay) + i915->ips.max_delay++; spin_unlock_irq(&mchdev_lock); - return ret; + drm_dev_put(&i915->drm); + return true; } EXPORT_SYMBOL_GPL(i915_gpu_lower); @@ -8224,13 +8286,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); */ bool i915_gpu_busy(void) { - bool ret = false; + struct drm_i915_private *i915; + bool ret; - spin_lock_irq(&mchdev_lock); - if (i915_mch_dev) - ret = i915_mch_dev->gt.awake; - spin_unlock_irq(&mchdev_lock); + i915 = mchdev_get(); + if (!i915) + return false; + ret = i915->gt.awake; + + drm_dev_put(&i915->drm); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_busy); @@ -8243,24 +8308,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy); */ bool i915_gpu_turbo_disable(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; - - dev_priv->ips.max_delay = dev_priv->ips.fstart; + struct drm_i915_private *i915; + bool ret; - if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) - ret = false; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + i915->ips.max_delay = i915->ips.fstart; + ret = ironlake_set_drps(i915, i915->ips.fstart); spin_unlock_irq(&mchdev_lock); + drm_dev_put(&i915->drm); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); @@ -8289,18 +8349,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv) { /* We only register the i915 ips part with intel-ips once everything is * set up, to avoid intel-ips sneaking in and reading bogus values. */ - spin_lock_irq(&mchdev_lock); - i915_mch_dev = dev_priv; - spin_unlock_irq(&mchdev_lock); + rcu_assign_pointer(i915_mch_dev, dev_priv); ips_ping_for_i915_load(); } void intel_gpu_ips_teardown(void) { - spin_lock_irq(&mchdev_lock); - i915_mch_dev = NULL; - spin_unlock_irq(&mchdev_lock); + rcu_assign_pointer(i915_mch_dev, NULL); } static void intel_init_emon(struct drm_i915_private *dev_priv) @@ -8410,7 +8466,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) intel_freq_opcode(dev_priv, 450)); /* After setting max-softlimit, find the overclock max freq */ - if (IS_GEN6(dev_priv) || + if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { u32 params = 0; @@ -8438,22 +8494,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) pm_runtime_put(&dev_priv->drm.pdev->dev); } -/** - * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev_priv: i915 device - * - * We don't want to disable RC6 or other features here, we just want - * to make sure any work we've queued has finished and won't bother - * us while we're suspended. - */ -void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 6) - return; - - /* gen6_rps_idle() will be called later to disable interrupts */ -} - void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) { dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */ @@ -8639,7 +8679,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; /* * Required for FBC @@ -8711,7 +8751,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) { int pipe; - uint32_t val; + u32 val; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -8746,7 +8786,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(MCH_SSKPD); if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) @@ -8756,7 +8796,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); @@ -8850,7 +8890,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) { - uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + u32 reg = I915_READ(GEN7_FF_THREAD_MODE); /* * WaVSThreadDispatchOverride:ivb,vlv @@ -8886,7 +8926,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) static void lpt_suspend_hw(struct drm_i915_private *dev_priv) { if (HAS_PCH_LPT_LP(dev_priv)) { - uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); + u32 val = I915_READ(SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); @@ -9124,7 +9164,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t snpcr; + u32 snpcr; I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); @@ -9333,7 +9373,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv) static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate; + u32 dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | @@ -9480,9 +9520,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->display.init_clock_gating = ivb_init_clock_gating; else if (IS_VALLEYVIEW(dev_priv)) dev_priv->display.init_clock_gating = vlv_init_clock_gating; - else if (IS_GEN6(dev_priv)) + else if (IS_GEN(dev_priv, 6)) dev_priv->display.init_clock_gating = gen6_init_clock_gating; - else if (IS_GEN5(dev_priv)) + else if (IS_GEN(dev_priv, 5)) dev_priv->display.init_clock_gating = ilk_init_clock_gating; else if (IS_G4X(dev_priv)) dev_priv->display.init_clock_gating = g4x_init_clock_gating; @@ -9490,11 +9530,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->display.init_clock_gating = i965gm_init_clock_gating; else if (IS_I965G(dev_priv)) dev_priv->display.init_clock_gating = i965g_init_clock_gating; - else if (IS_GEN3(dev_priv)) + else if (IS_GEN(dev_priv, 3)) dev_priv->display.init_clock_gating = gen3_init_clock_gating; else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) dev_priv->display.init_clock_gating = i85x_init_clock_gating; - else if (IS_GEN2(dev_priv)) + else if (IS_GEN(dev_priv, 2)) dev_priv->display.init_clock_gating = i830_init_clock_gating; else { MISSING_CASE(INTEL_DEVID(dev_priv)); @@ -9508,7 +9548,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) /* For cxsr */ if (IS_PINEVIEW(dev_priv)) i915_pineview_get_mem_freq(dev_priv); - else if (IS_GEN5(dev_priv)) + else if (IS_GEN(dev_priv, 5)) i915_ironlake_get_mem_freq(dev_priv); /* For FIFO watermark updates */ @@ -9520,9 +9560,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); - if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] && + if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] && dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || - (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] && + (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] && dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; dev_priv->display.compute_intermediate_wm = @@ -9549,7 +9589,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.initial_watermarks = g4x_initial_watermarks; dev_priv->display.optimize_watermarks = g4x_optimize_watermarks; } else if (IS_PINEVIEW(dev_priv)) { - if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), + if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq)) { @@ -9563,12 +9603,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.update_wm = NULL; } else dev_priv->display.update_wm = pineview_update_wm; - } else if (IS_GEN4(dev_priv)) { + } else if (IS_GEN(dev_priv, 4)) { dev_priv->display.update_wm = i965_update_wm; - } else if (IS_GEN3(dev_priv)) { + } else if (IS_GEN(dev_priv, 3)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; - } else if (IS_GEN2(dev_priv)) { + } else if (IS_GEN(dev_priv, 2)) { if (INTEL_INFO(dev_priv)->num_pipes == 1) { dev_priv->display.update_wm = i845_update_wm; dev_priv->display.get_fifo_size = i845_get_fifo_size; @@ -9583,7 +9623,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) { - uint32_t flags = + u32 flags = I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; switch (flags) { @@ -9606,7 +9646,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) { - uint32_t flags = + u32 flags = I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; switch (flags) { @@ -9647,7 +9687,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val I915_WRITE_FW(GEN6_PCODE_DATA1, 0); I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(&dev_priv->uncore, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 500, 0, NULL)) { DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n", @@ -9695,7 +9735,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN6_PCODE_DATA1, 0); I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(&dev_priv->uncore, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, fast_timeout_us, slow_timeout_ms, NULL)) { @@ -9919,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, const i915_reg_t reg) { + struct intel_uncore *uncore = &dev_priv->uncore; u64 time_hw, prev_hw, overflow_hw; unsigned int fw_domains; unsigned long flags; @@ -9940,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) return 0; - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - spin_lock_irqsave(&dev_priv->uncore.lock, flags); - intel_uncore_forcewake_get__locked(dev_priv, fw_domains); + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw_domains); /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -9962,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, } overflow_hw = BIT_ULL(32); - time_hw = I915_READ_FW(reg); + time_hw = intel_uncore_read_fw(uncore, reg); } /* @@ -9984,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; - intel_uncore_forcewake_put__locked(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, flags); return mul_u64_u32_div(time_hw, mul, div); } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 419e56342523..ec874d802d48 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -51,7 +51,7 @@ * must be correctly synchronized/cancelled when shutting down the pipe." */ -#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include "intel_drv.h" #include "i915_drv.h" @@ -71,15 +71,12 @@ static bool psr_global_enabled(u32 debug) static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { - /* Disable PSR2 by default for all platforms */ - if (i915_modparams.enable_psr == -1) - return false; - /* Cannot enable DSC and PSR2 simultaneously */ WARN_ON(crtc_state->dsc_params.compression_enable && crtc_state->has_psr2); switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; default: @@ -231,7 +228,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { - uint8_t dprx = 0; + u8 dprx = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &dprx) != 1) @@ -241,7 +238,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) { - uint8_t alpm_caps = 0; + u8 alpm_caps = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps) != 1) @@ -261,6 +258,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) return val; } +static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) +{ + u16 val; + ssize_t r; + + /* + * Returning the default X granularity if granularity not required or + * if DPCD read fails + */ + if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) + return 4; + + r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); + if (r != 2) + DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); + + /* + * Spec says that if the value read is 0 the default granularity should + * be used instead. + */ + if (r != 2 || val == 0) + val = 4; + + return val; +} + void intel_psr_init_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = @@ -274,10 +297,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); + if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { + DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); + return; + } + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); return; } + dev_priv->psr.sink_support = true; dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); @@ -309,6 +338,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) if (dev_priv->psr.sink_psr2_support) { dev_priv->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); + dev_priv->psr.su_x_granularity = + intel_dp_get_su_x_granulartiy(intel_dp); } } } @@ -351,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 aux_clock_divider, aux_ctl; int i; - static const uint8_t aux_msg[] = { + static const u8 aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, [2] = DP_SET_POWER & 0xff, @@ -388,44 +419,30 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) if (dev_priv->psr.psr2_enabled) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, DP_ALPM_ENABLE); - dpcd_val |= DP_PSR_ENABLE_PSR2; + dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; + } else { + if (dev_priv->psr.link_standby) + dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; + + if (INTEL_GEN(dev_priv) >= 8) + dpcd_val |= DP_PSR_CRC_VERIFICATION; } - if (dev_priv->psr.link_standby) - dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; - if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8) - dpcd_val |= DP_PSR_CRC_VERIFICATION; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } -static void hsw_activate_psr1(struct intel_dp *intel_dp) +static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 max_sleep_time = 0x1f; - u32 val = EDP_PSR_ENABLE; - - /* Let's use 6 as the minimum to cover all known cases including the - * off-by-one issue that HW has in some cases. - */ - int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - - /* sink_sync_latency of 8 means source has to wait for more than 8 - * frames, we'll go with 9 frames for now - */ - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); - val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + u32 val = 0; - val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; - if (IS_HASWELL(dev_priv)) - val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - - if (dev_priv->psr.link_standby) - val |= EDP_PSR_LINK_STANDBY; + if (INTEL_GEN(dev_priv) >= 11) + val |= EDP_PSR_TP4_TIME_0US; if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) - val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_TP1_TIME_0us; else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) val |= EDP_PSR_TP1_TIME_100us; else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) @@ -434,7 +451,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) val |= EDP_PSR_TP1_TIME_2500us; if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) - val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP2_TP3_TIME_0us; else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR_TP2_TP3_TIME_100us; else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) @@ -448,6 +465,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) else val |= EDP_PSR_TP1_TP2_SEL; + return val; +} + +static void hsw_activate_psr1(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 max_sleep_time = 0x1f; + u32 val = EDP_PSR_ENABLE; + + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. + */ + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + + /* sink_sync_latency of 8 means source has to wait for more than 8 + * frames, we'll go with 9 frames for now + */ + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + if (IS_HASWELL(dev_priv)) + val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + + if (dev_priv->psr.link_standby) + val |= EDP_PSR_LINK_STANDBY; + + val |= intel_psr1_get_tp_time(intel_dp); + if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; @@ -468,25 +514,30 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; - /* FIXME: selective update is probably totally broken because it doesn't - * mesh at all with our frontbuffer tracking. And the hw alone isn't - * good enough. */ val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) val |= EDP_Y_COORDINATE_ENABLE; val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && - dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) + if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) val |= EDP_PSR2_TP2_TIME_50us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) + else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR2_TP2_TIME_100us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) val |= EDP_PSR2_TP2_TIME_500us; else val |= EDP_PSR2_TP2_TIME_2500us; + /* + * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin + * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after + * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now + * lets workaround the issue by cleaning PSR_CTL before enable PSR2. + */ + I915_WRITE(EDP_PSR_CTL, 0); + I915_WRITE(EDP_PSR2_CTL, val); } @@ -498,11 +549,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0; - /* - * FIXME psr2_support is messed up. It's both computed - * dynamically during PSR enable, and extracted from sink - * caps during eDP detection. - */ if (!dev_priv->psr.sink_psr2_support) return false; @@ -519,7 +565,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { psr_max_h = 4096; psr_max_v = 2304; - } else if (IS_GEN9(dev_priv)) { + } else if (IS_GEN(dev_priv, 9)) { psr_max_h = 3640; psr_max_v = 2304; } @@ -531,6 +577,23 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + /* + * HW sends SU blocks of size four scan lines, which means the starting + * X coordinate and Y granularity requirements will always be met. We + * only need to validate the SU block width is a multiple of + * x granularity. + */ + if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { + DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", + crtc_hdisplay, dev_priv->psr.su_x_granularity); + return false; + } + + if (crtc_state->crc_enabled) { + DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); + return false; + } + return true; } @@ -641,17 +704,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_psr_setup_aux(intel_dp); - if (dev_priv->psr.psr2_enabled) { + if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && + !IS_GEMINILAKE(dev_priv))) { i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, cpu_transcoder); u32 chicken = I915_READ(reg); - if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) - chicken |= (PSR2_VSC_ENABLE_PROG_HEADER - | PSR2_ADD_VERTICAL_LINE_COUNT); - - else - chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; + chicken |= PSR2_VSC_ENABLE_PROG_HEADER | + PSR2_ADD_VERTICAL_LINE_COUNT; I915_WRITE(reg, chicken); } @@ -677,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, { struct intel_dp *intel_dp = dev_priv->psr.dp; - if (dev_priv->psr.enabled) - return; + WARN_ON(dev_priv->psr.enabled); + + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + dev_priv->psr.busy_frontbuffer_bits = 0; + dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; DRM_DEBUG_KMS("Enabling PSR%s\n", dev_priv->psr.psr2_enabled ? "2" : "1"); @@ -711,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp, WARN_ON(dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.prepared) { - DRM_DEBUG_KMS("PSR already in use\n"); + + if (!psr_global_enabled(dev_priv->psr.debug)) { + DRM_DEBUG_KMS("PSR disabled by flag\n"); goto unlock; } - dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); - dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.prepared = true; - dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - - if (psr_global_enabled(dev_priv->psr.debug)) - intel_psr_enable_locked(dev_priv, crtc_state); - else - DRM_DEBUG_KMS("PSR disabled by flag\n"); + intel_psr_enable_locked(dev_priv, crtc_state); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -778,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) } /* Wait till PSR is idle */ - if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0, - 2000)) + if (intel_wait_for_register(&dev_priv->uncore, + psr_status, psr_status_mask, 0, 2000)) DRM_ERROR("Timed out waiting PSR idle state\n"); /* Disable PSR on Sink */ @@ -807,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.prepared) { - mutex_unlock(&dev_priv->psr.lock); - return; - } intel_psr_disable_locked(intel_dp); - dev_priv->psr.prepared = false; mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); } +static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) +{ + /* + * Display WA #0884: all + * This documented WA for bxt can be safely applied + * broadly so we can force HW tracking to exit PSR + * instead of disabling and re-enabling. + * Workaround tells us to write 0 to CUR_SURFLIVE_A, + * but it makes more sense write to the current active + * pipe. + */ + I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); +} + +/** + * intel_psr_update - Update PSR state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This functions will update PSR states, disabling, enabling or switching PSR + * version when executing fastsets. For full modeset, intel_psr_disable() and + * intel_psr_enable() should be called instead. + */ +void intel_psr_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_psr *psr = &dev_priv->psr; + bool enable, psr2_enable; + + if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) + return; + + mutex_lock(&dev_priv->psr.lock); + + enable = crtc_state->has_psr && psr_global_enabled(psr->debug); + psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); + + if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { + /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ + if (crtc_state->crc_enabled && psr->enabled) + psr_force_hw_tracking_exit(dev_priv); + + goto unlock; + } + + if (psr->enabled) + intel_psr_disable_locked(intel_dp); + + if (enable) + intel_psr_enable_locked(dev_priv, crtc_state); + +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + /** * intel_psr_wait_for_idle - wait for PSR1 to idle * @new_crtc_state: new CRTC state @@ -849,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * defensive enough to cover everything. */ - return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, + return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); @@ -874,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->psr.lock); - err = intel_wait_for_register(dev_priv, reg, mask, 0, 50); + err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50); if (err) DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); @@ -883,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return err == 0 && dev_priv->psr.enabled; } -static bool switching_psr(struct drm_i915_private *dev_priv, - struct intel_crtc_state *crtc_state, - u32 mode) +static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) { - /* Can't switch psr state anyway if PSR2 is not supported. */ - if (!crtc_state || !crtc_state->has_psr2) - return false; + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + struct drm_crtc *crtc; + int err; - if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1) - return true; + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; - if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1) - return true; + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + state->acquire_ctx = &ctx; + +retry: + drm_for_each_crtc(crtc, dev) { + struct drm_crtc_state *crtc_state; + struct intel_crtc_state *intel_crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + err = PTR_ERR(crtc_state); + goto error; + } + + intel_crtc_state = to_intel_crtc_state(crtc_state); + + if (crtc_state->active && intel_crtc_state->has_psr) { + /* Mark mode as changed to trigger a pipe->update() */ + crtc_state->mode_changed = true; + break; + } + } + + err = drm_atomic_commit(state); + +error: + if (err == -EDEADLK) { + drm_atomic_state_clear(state); + err = drm_modeset_backoff(&ctx); + if (!err) + goto retry; + } - return false; + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_atomic_state_put(state); + + return err; } -int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, - struct drm_modeset_acquire_ctx *ctx, - u64 val) +int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) { - struct drm_device *dev = &dev_priv->drm; - struct drm_connector_state *conn_state; - struct intel_crtc_state *crtc_state = NULL; - struct drm_crtc_commit *commit; - struct drm_crtc *crtc; - struct intel_dp *dp; + const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; + u32 old_mode; int ret; - bool enable; - u32 mode = val & I915_PSR_DEBUG_MODE_MASK; if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || mode > I915_PSR_DEBUG_FORCE_PSR1) { @@ -920,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, return -EINVAL; } - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); - if (ret) - return ret; - - /* dev_priv->psr.dp should be set once and then never touched again. */ - dp = READ_ONCE(dev_priv->psr.dp); - conn_state = dp->attached_connector->base.state; - crtc = conn_state->crtc; - if (crtc) { - ret = drm_modeset_lock(&crtc->mutex, ctx); - if (ret) - return ret; - - crtc_state = to_intel_crtc_state(crtc->state); - commit = crtc_state->base.commit; - } else { - commit = conn_state->commit; - } - if (commit) { - ret = wait_for_completion_interruptible(&commit->hw_done); - if (ret) - return ret; - } - ret = mutex_lock_interruptible(&dev_priv->psr.lock); if (ret) return ret; - enable = psr_global_enabled(val); - - if (!enable || switching_psr(dev_priv, crtc_state, mode)) - intel_psr_disable_locked(dev_priv->psr.dp); - + old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; dev_priv->psr.debug = val; - if (crtc) - dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); - if (dev_priv->psr.prepared && enable) - intel_psr_enable_locked(dev_priv, crtc_state); - mutex_unlock(&dev_priv->psr.lock); + + if (old_mode != mode) + ret = intel_psr_fastset_force(dev_priv); + return ret; } @@ -1080,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; /* By definition flush = invalidate + flush */ - if (frontbuffer_bits) { - /* - * Display WA #0884: all - * This documented WA for bxt can be safely applied - * broadly so we can force HW tracking to exit PSR - * instead of disabling and re-enabling. - * Workaround tells us to write 0 to CUR_SURFLIVE_A, - * but it makes more sense write to the current active - * pipe. - */ - I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); - } + if (frontbuffer_bits) + psr_force_hw_tracking_exit(dev_priv); if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) schedule_work(&dev_priv->psr.work); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c5eb26a7ee79..8a19eee9c5d4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -29,11 +29,11 @@ #include <linux/log2.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_reset.h" #include "i915_trace.h" #include "intel_drv.h" #include "intel_workarounds.h" @@ -43,19 +43,6 @@ */ #define LEGACY_REQUEST_SIZE 200 -static unsigned int __intel_ring_space(unsigned int head, - unsigned int tail, - unsigned int size) -{ - /* - * "If the Ring Buffer Head Pointer and the Tail Pointer are on the - * same cacheline, the Head Pointer must not be greater than the Tail - * Pointer." - */ - GEM_BUG_ON(!is_power_of_2(size)); - return (head - tail - CACHELINE_BYTES) & (size - 1); -} - unsigned int intel_ring_update_space(struct intel_ring *ring) { unsigned int space; @@ -69,19 +56,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring) static int gen2_render_ring_flush(struct i915_request *rq, u32 mode) { + unsigned int num_store_dw; u32 cmd, *cs; cmd = MI_FLUSH; - + num_store_dw = 0; if (mode & EMIT_INVALIDATE) cmd |= MI_READ_FLUSH; + if (mode & EMIT_FLUSH) + num_store_dw = 4; - cs = intel_ring_begin(rq, 2); + cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = cmd; - *cs++ = MI_NOOP; + while (num_store_dw--) { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = i915_scratch_offset(rq->i915); + *cs++ = 0; + } + *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; + intel_ring_advance(rq, cs); return 0; @@ -124,7 +120,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) cmd = MI_FLUSH; if (mode & EMIT_INVALIDATE) { cmd |= MI_EXE_FLUSH; - if (IS_G4X(rq->i915) || IS_GEN5(rq->i915)) + if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) cmd |= MI_INVALIDATE_ISP; } @@ -208,7 +204,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) * really our business. That leaves only stall at scoreboard. */ static int -intel_emit_post_sync_nonzero_flush(struct i915_request *rq) +gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) { u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 *cs; @@ -248,7 +244,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode) int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ - ret = intel_emit_post_sync_nonzero_flush(rq); + ret = gen6_emit_post_sync_nonzero_flush(rq); if (ret) return ret; @@ -291,6 +287,43 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode) return 0; } +static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; + *cs++ = 0; + *cs++ = 0; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_QW_WRITE; + *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + + /* Finally we can flush and with it emit the breadcrumb */ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_CS_STALL); + *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + static int gen7_render_ring_cs_stall_wa(struct i915_request *rq) { @@ -370,11 +403,111 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) return 0; } -static void ring_setup_phys_status_page(struct intel_engine_cs *engine) +static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL); + *cs++ = rq->timeline->hwsp_offset; + *cs++ = rq->fence.seqno; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = (PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_GLOBAL_GTT_IVB); + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} + +#define GEN7_XCS_WA 32 +static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) +{ + int i; + + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = rq->fence.seqno; + + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + + for (i = 0; i < GEN7_XCS_WA; i++) { + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + } + + *cs++ = MI_FLUSH_DW; + *cs++ = 0; + *cs++ = 0; + + *cs++ = MI_USER_INTERRUPT; + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return cs; +} +#undef GEN7_XCS_WA + +static void set_hwstam(struct intel_engine_cs *engine, u32 mask) +{ + /* + * Keep the render interrupt unmasked as this papers over + * lost interrupts following a reset. + */ + if (engine->class == RENDER_CLASS) { + if (INTEL_GEN(engine->i915) >= 6) + mask &= ~BIT(0); + else + mask &= ~I915_USER_INTERRUPT; + } + + intel_engine_set_hwsp_writemask(engine, mask); +} + +static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) { struct drm_i915_private *dev_priv = engine->i915; - struct page *page = virt_to_page(engine->status_page.page_addr); - phys_addr_t phys = PFN_PHYS(page_to_pfn(page)); u32 addr; addr = lower_32_bits(phys); @@ -384,15 +517,30 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine) I915_WRITE(HWS_PGA, addr); } -static void intel_ring_setup_status_page(struct intel_engine_cs *engine) +static struct page *status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj = engine->status_page.vma->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static void ring_setup_phys_status_page(struct intel_engine_cs *engine) +{ + set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); + set_hwstam(engine, ~0u); +} + +static void set_hwsp(struct intel_engine_cs *engine, u32 offset) { struct drm_i915_private *dev_priv = engine->i915; - i915_reg_t mmio; + i915_reg_t hwsp; - /* The ring status page addresses are no longer next to the rest of + /* + * The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev_priv)) { + if (IS_GEN(dev_priv, 7)) { switch (engine->id) { /* * No more rings exist on Gen7. Default case is only to shut up @@ -400,57 +548,57 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) */ default: GEM_BUG_ON(engine->id); - case RCS: - mmio = RENDER_HWS_PGA_GEN7; + /* fallthrough */ + case RCS0: + hwsp = RENDER_HWS_PGA_GEN7; break; - case BCS: - mmio = BLT_HWS_PGA_GEN7; + case BCS0: + hwsp = BLT_HWS_PGA_GEN7; break; - case VCS: - mmio = BSD_HWS_PGA_GEN7; + case VCS0: + hwsp = BSD_HWS_PGA_GEN7; break; - case VECS: - mmio = VEBOX_HWS_PGA_GEN7; + case VECS0: + hwsp = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(dev_priv)) { - mmio = RING_HWS_PGA_GEN6(engine->mmio_base); + } else if (IS_GEN(dev_priv, 6)) { + hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); } else { - mmio = RING_HWS_PGA(engine->mmio_base); + hwsp = RING_HWS_PGA(engine->mmio_base); } - if (INTEL_GEN(dev_priv) >= 6) { - u32 mask = ~0u; + I915_WRITE(hwsp, offset); + POSTING_READ(hwsp); +} - /* - * Keep the render interrupt unmasked as this papers over - * lost interrupts following a reset. - */ - if (engine->id == RCS) - mask &= ~BIT(0); +static void flush_cs_tlb(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE(RING_HWSTAM(engine->mmio_base), mask); - } + if (!IS_GEN_RANGE(dev_priv, 6, 7)) + return; - I915_WRITE(mmio, engine->status_page.ggtt_offset); - POSTING_READ(mmio); + /* ring should be idle before issuing a sync flush*/ + WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); - /* Flush the TLB for this page */ - if (IS_GEN(dev_priv, 6, 7)) { - i915_reg_t reg = RING_INSTPM(engine->mmio_base); + ENGINE_WRITE(engine, RING_INSTPM, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (intel_wait_for_register(engine->uncore, + RING_INSTPM(engine->mmio_base), + INSTPM_SYNC_FLUSH, 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + engine->name); +} - /* ring should be idle before issuing a sync flush*/ - WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); +static void ring_setup_status_page(struct intel_engine_cs *engine) +{ + set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); + set_hwstam(engine, ~0u); - I915_WRITE(reg, - _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | - INSTPM_SYNC_FLUSH)); - if (intel_wait_for_register(dev_priv, - reg, INSTPM_SYNC_FLUSH, 0, - 1000)) - DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", - engine->name); - } + flush_cs_tlb(engine); } static bool stop_ring(struct intel_engine_cs *engine) @@ -458,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; if (INTEL_GEN(dev_priv) > 2) { - I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register(dev_priv, + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); + if (intel_wait_for_register(engine->uncore, RING_MI_MODE(engine->mmio_base), MODE_IDLE, MODE_IDLE, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", engine->name); - /* Sometimes we observe that the idle flag is not + + /* + * Sometimes we observe that the idle flag is not * set even though the ring is empty. So double * check before giving up. */ - if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) + if (ENGINE_READ(engine, RING_HEAD) != + ENGINE_READ(engine, RING_TAIL)) return false; } } - I915_WRITE_HEAD(engine, I915_READ_TAIL(engine)); + ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); - I915_WRITE_HEAD(engine, 0); - I915_WRITE_TAIL(engine, 0); + ENGINE_WRITE(engine, RING_HEAD, 0); + ENGINE_WRITE(engine, RING_TAIL, 0); /* The ring must be empty before it is disabled */ - I915_WRITE_CTL(engine, 0); + ENGINE_WRITE(engine, RING_CTL, 0); - return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; + return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; } static int init_ring_common(struct intel_engine_cs *engine) @@ -492,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine) struct intel_ring *ring = engine->buffer; int ret = 0; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); if (!stop_ring(engine)) { /* G45 ring initialization often fails to reset head to zero */ DRM_DEBUG_DRIVER("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", engine->name, - I915_READ_CTL(engine), - I915_READ_HEAD(engine), - I915_READ_TAIL(engine), - I915_READ_START(engine)); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); if (!stop_ring(engine)) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", engine->name, - I915_READ_CTL(engine), - I915_READ_HEAD(engine), - I915_READ_TAIL(engine), - I915_READ_START(engine)); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); ret = -EIO; goto out; } @@ -520,30 +672,23 @@ static int init_ring_common(struct intel_engine_cs *engine) if (HWS_NEEDS_PHYSICAL(dev_priv)) ring_setup_phys_status_page(engine); else - intel_ring_setup_status_page(engine); + ring_setup_status_page(engine); intel_engine_reset_breadcrumbs(engine); - if (HAS_LEGACY_SEMAPHORES(engine->i915)) { - I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); - I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); - if (HAS_VEBOX(dev_priv)) - I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); - } - /* Enforce ordering by reading HEAD register back */ - I915_READ_HEAD(engine); + ENGINE_READ(engine, RING_HEAD); /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); + ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ - if (I915_READ_HEAD(engine)) + if (ENGINE_READ(engine, RING_HEAD)) DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n", - engine->name, I915_READ_HEAD(engine)); + engine->name, ENGINE_READ(engine, RING_HEAD)); /* Check that the ring offsets point within the ring! */ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); @@ -551,96 +696,117 @@ static int init_ring_common(struct intel_engine_cs *engine) intel_ring_update_space(ring); /* First wake the ring up to an empty/idle ring */ - I915_WRITE_HEAD(engine, ring->head); - I915_WRITE_TAIL(engine, ring->head); - (void)I915_READ_TAIL(engine); + ENGINE_WRITE(engine, RING_HEAD, ring->head); + ENGINE_WRITE(engine, RING_TAIL, ring->head); + ENGINE_POSTING_READ(engine, RING_TAIL); - I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); + ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); /* If the head is still not zero, the ring is dead */ - if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base), + if (intel_wait_for_register(engine->uncore, + RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", engine->name, - I915_READ_CTL(engine), - I915_READ_CTL(engine) & RING_VALID, - I915_READ_HEAD(engine), ring->head, - I915_READ_TAIL(engine), ring->tail, - I915_READ_START(engine), + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & RING_VALID, + ENGINE_READ(engine, RING_HEAD), ring->head, + ENGINE_READ(engine, RING_TAIL), ring->tail, + ENGINE_READ(engine, RING_START), i915_ggtt_offset(ring->vma)); ret = -EIO; goto out; } if (INTEL_GEN(dev_priv) > 2) - I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); /* Now awake, let it get started */ if (ring->tail != ring->head) { - I915_WRITE_TAIL(engine, ring->tail); - (void)I915_READ_TAIL(engine); + ENGINE_WRITE(engine, RING_TAIL, ring->tail); + ENGINE_POSTING_READ(engine, RING_TAIL); } /* Papering over lost _interrupts_ immediately following the restart */ - intel_engine_wakeup(engine); + intel_engine_queue_breadcrumbs(engine); out: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); return ret; } -static struct i915_request *reset_prepare(struct intel_engine_cs *engine) +static void reset_prepare(struct intel_engine_cs *engine) { intel_engine_stop_cs(engine); - - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - - return i915_gem_find_active_request(engine); } -static void skip_request(struct i915_request *rq) +static void reset_ring(struct intel_engine_cs *engine, bool stalled) { - void *vaddr = rq->ring->vaddr; + struct i915_timeline *tl = &engine->timeline; + struct i915_request *pos, *rq; + unsigned long flags; u32 head; - head = rq->infix; - if (rq->postfix < head) { - memset32(vaddr + head, MI_NOOP, - (rq->ring->size - head) / sizeof(u32)); - head = 0; + rq = NULL; + spin_lock_irqsave(&tl->lock, flags); + list_for_each_entry(pos, &tl->requests, link) { + if (!i915_request_completed(pos)) { + rq = pos; + break; + } } - memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32)); -} - -static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) -{ - GEM_TRACE("%s request global=%d, current=%d\n", - engine->name, rq ? rq->global_seqno : 0, - intel_engine_get_seqno(engine)); /* - * Try to restore the logical GPU state to match the continuation - * of the request queue. If we skip the context/PD restore, then - * the next request may try to execute assuming that its context - * is valid and loaded on the GPU and so may try to access invalid - * memory, prompting repeated GPU hangs. + * The guilty request will get skipped on a hung engine. * - * If the request was guilty, we still restore the logical state - * in case the next request requires it (e.g. the aliasing ppgtt), - * but skip over the hung batch. + * Users of client default contexts do not rely on logical + * state preserved between batches so it is safe to execute + * queued requests following the hang. Non default contexts + * rely on preserved state, so skipping a batch loses the + * evolution of the state and it needs to be considered corrupted. + * Executing more queued batches on top of corrupted state is + * risky. But we take the risk by trying to advance through + * the queued requests in order to make the client behaviour + * more predictable around resets, by not throwing away random + * amount of batches it has prepared for execution. Sophisticated + * clients can use gem_reset_stats_ioctl and dma fence status + * (exported via sync_file info ioctl on explicit fences) to observe + * when it loses the context state and should rebuild accordingly. * - * If the request was innocent, we try to replay the request with - * the restored context. + * The context ban, and ultimately the client ban, mechanism are safety + * valves if client submission ends up resulting in nothing more than + * subsequent hangs. */ + if (rq) { - /* If the rq hung, jump to its breadcrumb and skip the batch */ - rq->ring->head = intel_ring_wrap(rq->ring, rq->head); - if (rq->fence.error == -EIO) - skip_request(rq); + /* + * Try to restore the logical GPU state to match the + * continuation of the request queue. If we skip the + * context/PD restore, then the next request may try to execute + * assuming that its context is valid and loaded on the GPU and + * so may try to access invalid memory, prompting repeated GPU + * hangs. + * + * If the request was guilty, we still restore the logical + * state in case the next request requires it (e.g. the + * aliasing ppgtt), but skip over the hung batch. + * + * If the request was innocent, we try to replay the request + * with the restored context. + */ + i915_reset_request(rq, stalled); + + GEM_BUG_ON(rq->ring != engine->buffer); + head = rq->head; + } else { + head = engine->buffer->tail; } + engine->buffer->head = intel_ring_wrap(engine->buffer, head); + + spin_unlock_irqrestore(&tl->lock, flags); } static void reset_finish(struct intel_engine_cs *engine) @@ -670,7 +836,7 @@ static int init_render_ring(struct intel_engine_cs *engine) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (IS_GEN(dev_priv, 4, 6)) + if (IS_GEN_RANGE(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -679,22 +845,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (IS_GEN(dev_priv, 6, 7)) + if (IS_GEN_RANGE(dev_priv, 6, 7)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev_priv)) + if (IS_GEN(dev_priv, 7)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -704,42 +870,15 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (IS_GEN(dev_priv, 6, 7)) + if (IS_GEN_RANGE(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); if (INTEL_GEN(dev_priv) >= 6) - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); return 0; } -static u32 *gen6_signal(struct i915_request *rq, u32 *cs) -{ - struct drm_i915_private *dev_priv = rq->i915; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int num_rings = 0; - - for_each_engine(engine, dev_priv, id) { - i915_reg_t mbox_reg; - - if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) - continue; - - mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id]; - if (i915_mmio_reg_valid(mbox_reg)) { - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(mbox_reg); - *cs++ = rq->global_seqno; - num_rings++; - } - } - if (num_rings & 1) - *cs++ = MI_NOOP; - - return cs; -} - static void cancel_requests(struct intel_engine_cs *engine) { struct i915_request *request; @@ -749,19 +888,12 @@ static void cancel_requests(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(request, &engine->timeline.requests, link) { - GEM_BUG_ON(!request->global_seqno); + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &request->fence.flags)) - continue; - - dma_fence_set_error(&request->fence, -EIO); + i915_request_mark_complete(request); } - intel_write_status_page(engine, - I915_GEM_HWS_INDEX, - intel_engine_last_submit(engine)); - /* Remaining _unready_ requests will be nop'ed when submitted */ spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -769,102 +901,65 @@ static void cancel_requests(struct intel_engine_cs *engine) static void i9xx_submit_request(struct i915_request *request) { - struct drm_i915_private *dev_priv = request->i915; - i915_request_submit(request); - I915_WRITE_TAIL(request->engine, - intel_ring_set_tail(request->ring, request->tail)); + ENGINE_WRITE(request->engine, RING_TAIL, + intel_ring_set_tail(request->ring, request->tail)); } -static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) +static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) { + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + + *cs++ = MI_FLUSH; + + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + *cs++ = MI_STORE_DWORD_INDEX; - *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + *cs++ = MI_USER_INTERRUPT; rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); -} - -static const int i9xx_emit_breadcrumb_sz = 4; -static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs) -{ - return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs)); + return cs; } -static int -gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal) +#define GEN5_WA_STORES 8 /* must be at least 1! */ +static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - u32 dw1 = MI_SEMAPHORE_MBOX | - MI_SEMAPHORE_COMPARE | - MI_SEMAPHORE_REGISTER; - u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id]; - u32 *cs; - - WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); + int i; - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); + GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); - *cs++ = dw1 | wait_mbox; - /* Throughout all of the GEM code, seqno passed implies our current - * seqno is >= the last seqno executed. However for hardware the - * comparison is strictly greater than. - */ - *cs++ = signal->global_seqno - 1; - *cs++ = 0; - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); + *cs++ = MI_FLUSH; - return 0; -} + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); + + BUILD_BUG_ON(GEN5_WA_STORES < 1); + for (i = 0; i < GEN5_WA_STORES; i++) { + *cs++ = MI_STORE_DWORD_INDEX; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; + } -static void -gen5_seqno_barrier(struct intel_engine_cs *engine) -{ - /* MI_STORE are internally buffered by the GPU and not flushed - * either by MI_FLUSH or SyncFlush or any other combination of - * MI commands. - * - * "Only the submission of the store operation is guaranteed. - * The write result will be complete (coherent) some time later - * (this is practically a finite period but there is no guaranteed - * latency)." - * - * Empirically, we observe that we need a delay of at least 75us to - * be sure that the seqno write is visible by the CPU. - */ - usleep_range(125, 250); -} + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; -static void -gen6_seqno_barrier(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); - /* Workaround to force correct ordering between irq and seqno writes on - * ivb (and maybe also on snb) by reading from a CS register (like - * ACTHD) before reading the status page. - * - * Note that this effectively stalls the read by the time it takes to - * do a memory transaction, which more or less ensures that the write - * from the GPU has sufficient time to invalidate the CPU cacheline. - * Alternatively we could delay the interrupt from the CS ring to give - * the write time to land, but that would incur a delay after every - * batch i.e. much more frequent than a delay when waiting for the - * interrupt (with the same net latency). - * - * Also note that to prevent whole machine hangs on gen7, we have to - * take the spinlock to guard against concurrent cacheline access. - */ - spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); - spin_unlock_irq(&dev_priv->uncore.lock); + return cs; } +#undef GEN5_WA_STORES static void gen5_irq_enable(struct intel_engine_cs *engine) @@ -881,20 +976,16 @@ gen5_irq_disable(struct intel_engine_cs *engine) static void i9xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + engine->i915->irq_mask &= ~engine->irq_enable_mask; + intel_uncore_write(engine->uncore, IMR, engine->i915->irq_mask); + intel_uncore_posting_read_fw(engine->uncore, IMR); } static void i9xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); + engine->i915->irq_mask |= engine->irq_enable_mask; + intel_uncore_write(engine->uncore, IMR, engine->i915->irq_mask); } static void @@ -934,39 +1025,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode) static void gen6_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - engine->irq_keep_mask)); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); + /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ + ENGINE_POSTING_READ(engine, RING_IMR); + + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void gen6_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } static void hsw_vebox_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); + + /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ + ENGINE_POSTING_READ(engine, RING_IMR); - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); + gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask); } static void hsw_vebox_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, ~0); - gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask); + ENGINE_WRITE(engine, RING_IMR, ~0); + gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask); } static int @@ -1082,6 +1172,10 @@ int intel_ring_pin(struct intel_ring *ring) GEM_BUG_ON(ring->vaddr); + ret = i915_timeline_pin(ring->timeline); + if (ret) + return ret; + flags = PIN_GLOBAL; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ @@ -1092,34 +1186,29 @@ int intel_ring_pin(struct intel_ring *ring) else flags |= PIN_HIGH; - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - if (flags & PIN_MAPPABLE || map == I915_MAP_WC) - ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); - else - ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); - if (unlikely(ret)) - return ret; - } - ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - return ret; + goto unpin_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else addr = i915_gem_object_pin_map(vma->obj, map); - if (IS_ERR(addr)) - goto err; + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto unpin_ring; + } vma->obj->pin_global++; ring->vaddr = addr; return 0; -err: +unpin_ring: i915_vma_unpin(vma); - return PTR_ERR(addr); +unpin_timeline: + i915_timeline_unpin(ring->timeline); + return ret; } void intel_ring_reset(struct intel_ring *ring, u32 tail) @@ -1148,6 +1237,8 @@ void intel_ring_unpin(struct intel_ring *ring) ring->vma->obj->pin_global--; i915_vma_unpin(ring->vma); + + i915_timeline_unpin(ring->timeline); } static struct i915_vma * @@ -1198,6 +1289,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, if (!ring) return ERR_PTR(-ENOMEM); + kref_init(&ring->ref); INIT_LIST_HEAD(&ring->request_list); ring->timeline = i915_timeline_get(timeline); @@ -1222,9 +1314,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine, return ring; } -void -intel_ring_free(struct intel_ring *ring) +void intel_ring_free(struct kref *ref) { + struct intel_ring *ring = container_of(ref, typeof(*ring), ref); struct drm_i915_gem_object *obj = ring->vma->obj; i915_vma_close(ring->vma); @@ -1234,17 +1326,24 @@ intel_ring_free(struct intel_ring *ring) kfree(ring); } -static void intel_ring_context_destroy(struct intel_context *ce) +static void __ring_context_fini(struct intel_context *ce) { - GEM_BUG_ON(ce->pin_count); - - if (!ce->state) - return; - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } +static void ring_context_destroy(struct kref *ref) +{ + struct intel_context *ce = container_of(ref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __ring_context_fini(ce); + + intel_context_free(ce); +} + static int __context_pin_ppgtt(struct i915_gem_context *ctx) { struct i915_hw_ppgtt *ppgtt; @@ -1275,17 +1374,6 @@ static int __context_pin(struct intel_context *ce) if (!vma) return 0; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (err) - return err; - } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) return err; @@ -1295,6 +1383,7 @@ static int __context_pin(struct intel_context *ce) * it cannot reclaim the object until we release it. */ vma->obj->pin_global++; + vma->obj->mm.dirty = true; return 0; } @@ -1311,12 +1400,10 @@ static void __context_unpin(struct intel_context *ce) i915_vma_unpin(vma); } -static void intel_ring_context_unpin(struct intel_context *ce) +static void ring_context_unpin(struct intel_context *ce) { __context_unpin_ppgtt(ce->gem_context); __context_unpin(ce); - - i915_gem_context_put(ce->gem_context); } static struct i915_vma * @@ -1331,6 +1418,24 @@ alloc_context_vma(struct intel_engine_cs *engine) if (IS_ERR(obj)) return ERR_CAST(obj); + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + * + * Snooping is required on non-llc platforms in execlist + * mode, but since all GGTT accesses use PAT entry 0 we + * get snooping anyway regardless of cache_level. + * + * This is only applicable for Ivy Bridge devices since + * later platforms don't have L3 control bits in the PTE. + */ + if (IS_IVYBRIDGE(i915)) + i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); + if (engine->default_state) { void *defaults, *vaddr; @@ -1348,29 +1453,10 @@ alloc_context_vma(struct intel_engine_cs *engine) } memcpy(vaddr, defaults, engine->context_size); - i915_gem_object_unpin_map(engine->default_state); - i915_gem_object_unpin_map(obj); - } - /* - * Try to make the context utilize L3 as well as LLC. - * - * On VLV we don't have L3 controls in the PTEs so we - * shouldn't touch the cache level, especially as that - * would make the object snooped which might have a - * negative performance impact. - * - * Snooping is required on non-llc platforms in execlist - * mode, but since all GGTT accesses use PAT entry 0 we - * get snooping anyway regardless of cache_level. - * - * This is only applicable for Ivy Bridge devices since - * later platforms don't have L3 control bits in the PTE. - */ - if (IS_IVYBRIDGE(i915)) { - /* Ignore any error, regard it as a simple optimisation */ - i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); } vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); @@ -1388,83 +1474,62 @@ err_obj: return ERR_PTR(err); } -static struct intel_context * -__ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - struct intel_context *ce) +static int ring_context_pin(struct intel_context *ce) { + struct intel_engine_cs *engine = ce->engine; int err; + /* One ringbuffer to rule them all */ + GEM_BUG_ON(!engine->buffer); + ce->ring = engine->buffer; + if (!ce->state && engine->context_size) { struct i915_vma *vma; vma = alloc_context_vma(engine); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } + if (IS_ERR(vma)) + return PTR_ERR(vma); ce->state = vma; } err = __context_pin(ce); if (err) - goto err; + return err; err = __context_pin_ppgtt(ce->gem_context); if (err) goto err_unpin; - i915_gem_context_get(ctx); - - /* One ringbuffer to rule them all */ - GEM_BUG_ON(!engine->buffer); - ce->ring = engine->buffer; - - return ce; + return 0; err_unpin: __context_unpin(ce); -err: - ce->pin_count = 0; - return ERR_PTR(err); + return err; } static const struct intel_context_ops ring_context_ops = { - .unpin = intel_ring_context_unpin, - .destroy = intel_ring_context_destroy, + .pin = ring_context_pin, + .unpin = ring_context_unpin, + .destroy = ring_context_destroy, }; -static struct intel_context * -intel_ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) -{ - struct intel_context *ce = to_intel_context(ctx, engine); - - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - return ce; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - - ce->ops = &ring_context_ops; - - return __ring_context_pin(engine, ctx, ce); -} - static int intel_init_ring_buffer(struct intel_engine_cs *engine) { struct i915_timeline *timeline; struct intel_ring *ring; int err; - intel_engine_setup_common(engine); + err = intel_engine_setup_common(engine); + if (err) + return err; - timeline = i915_timeline_create(engine->i915, engine->name); + timeline = i915_timeline_create(engine->i915, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; } + GEM_BUG_ON(timeline->has_initial_breadcrumb); ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); i915_timeline_put(timeline); @@ -1484,12 +1549,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) goto err_unpin; + GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma); + return 0; err_unpin: intel_ring_unpin(ring); err_ring: - intel_ring_free(ring); + intel_ring_put(ring); err: intel_engine_cleanup_common(engine); return err; @@ -1500,10 +1567,10 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; WARN_ON(INTEL_GEN(dev_priv) > 2 && - (I915_READ_MODE(engine) & MODE_IDLE) == 0); + (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); intel_ring_unpin(engine->buffer); - intel_ring_free(engine->buffer); + intel_ring_put(engine->buffer); if (engine->cleanup) engine->cleanup(engine); @@ -1535,11 +1602,11 @@ static int load_pd_dir(struct i915_request *rq, return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); *cs++ = PP_DIR_DCLV_2G; *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = ppgtt->pd.base.ggtt_offset << 10; intel_ring_advance(rq, cs); @@ -1558,7 +1625,7 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = i915_scratch_offset(rq->i915); *cs++ = MI_NOOP; @@ -1571,11 +1638,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) struct drm_i915_private *i915 = rq->i915; struct intel_engine_cs *engine = rq->engine; enum intel_engine_id id; - const int num_rings = - /* Use an extended w/a on gen7 if signalling from other rings */ - (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ? - INTEL_INFO(i915)->num_rings - 1 : - 0; + const int num_engines = + IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; bool force_restore = false; int len; u32 *cs; @@ -1588,8 +1652,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; len = 4; - if (IS_GEN7(i915)) - len += 2 + (num_rings ? 4*num_rings + 6 : 0); + if (IS_GEN(i915, 7)) + len += 2 + (num_engines ? 4 * num_engines + 6 : 0); if (flags & MI_FORCE_RESTORE) { GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); flags &= ~MI_FORCE_RESTORE; @@ -1602,12 +1666,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) return PTR_ERR(cs); /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (IS_GEN7(i915)) { + if (IS_GEN(i915, 7)) { *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - if (num_rings) { + if (num_engines) { struct intel_engine_cs *signaller; - *cs++ = MI_LOAD_REGISTER_IMM(num_rings); + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); for_each_engine(signaller, i915, id) { if (signaller == engine) continue; @@ -1634,8 +1698,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) * placeholder we use to flush other contexts. */ *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context, - engine)->state) | + *cs++ = i915_ggtt_offset(engine->kernel_context->state) | MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT; } @@ -1649,12 +1712,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) */ *cs++ = MI_NOOP; - if (IS_GEN7(i915)) { - if (num_rings) { + if (IS_GEN(i915, 7)) { + if (num_engines) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ - *cs++ = MI_LOAD_REGISTER_IMM(num_rings); + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); for_each_engine(signaller, i915, id) { if (signaller == engine) continue; @@ -1732,7 +1795,7 @@ static int switch_context(struct i915_request *rq) * explanation. */ loops = 1; - if (engine->id == BCS && IS_VALLEYVIEW(engine->i915)) + if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915)) loops = 32; do { @@ -1741,15 +1804,15 @@ static int switch_context(struct i915_request *rq) goto err; } while (--loops); - if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { - unwind_mm = intel_engine_flag(engine); - ppgtt->pd_dirty_rings &= ~unwind_mm; + if (ppgtt->pd_dirty_engines & engine->mask) { + unwind_mm = engine->mask; + ppgtt->pd_dirty_engines &= ~unwind_mm; hw_flags = MI_FORCE_RESTORE; } } if (rq->hw_context->state) { - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); /* * The kernel context(s) is treated as pure scratch and is not @@ -1809,7 +1872,7 @@ static int switch_context(struct i915_request *rq) err_mm: if (unwind_mm) - ppgtt->pd_dirty_rings |= unwind_mm; + ppgtt->pd_dirty_engines |= unwind_mm; err: return ret; } @@ -1818,19 +1881,22 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!request->hw_context->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); + GEM_BUG_ON(request->timeline->has_initial_breadcrumb); - /* Flush enough space to reduce the likelihood of waiting after + /* + * Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just * have to repeat work. */ request->reserved_space += LEGACY_REQUEST_SIZE; - ret = intel_ring_wait_for_space(request->ring, request->reserved_space); + ret = switch_context(request); if (ret) return ret; - ret = switch_context(request); + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); if (ret) return ret; @@ -1872,22 +1938,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) return 0; } -int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes) -{ - GEM_BUG_ON(bytes > ring->effective_size); - if (unlikely(bytes > ring->effective_size - ring->emit)) - bytes += ring->size - ring->emit; - - if (unlikely(bytes > ring->space)) { - int ret = wait_for_space(ring, bytes); - if (unlikely(ret)) - return ret; - } - - GEM_BUG_ON(ring->space < bytes); - return 0; -} - u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) { struct intel_ring *ring = rq->ring; @@ -1992,23 +2042,23 @@ int intel_ring_cacheline_align(struct i915_request *rq) static void gen6_bsd_submit_request(struct i915_request *request) { - struct drm_i915_private *dev_priv = request->i915; + struct intel_uncore *uncore = request->engine->uncore; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Every tail move must follow the sequence below */ /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ - I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); + intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, GEN6_BSD_SLEEP_INDICATOR, 0, @@ -2021,10 +2071,10 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); } static int mi_flush_dw(struct i915_request *rq, u32 flags) @@ -2120,77 +2170,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode) return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); } -static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - int i; - - if (!HAS_LEGACY_SEMAPHORES(dev_priv)) - return; - - GEM_BUG_ON(INTEL_GEN(dev_priv) < 6); - engine->semaphore.sync_to = gen6_ring_sync_to; - engine->semaphore.signal = gen6_signal; - - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between RCS and VCS2 is - * initialized as INVALID. - */ - for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { - static const struct { - u32 wait_mbox; - i915_reg_t mbox_reg; - } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { - [RCS_HW] = { - [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, - [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, - [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, - }, - [VCS_HW] = { - [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, - [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, - [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, - }, - [BCS_HW] = { - [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, - [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, - [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, - }, - [VECS_HW] = { - [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, - [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, - [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, - }, - }; - u32 wait_mbox; - i915_reg_t mbox_reg; - - if (i == engine->hw_id) { - wait_mbox = MI_SEMAPHORE_SYNC_INVALID; - mbox_reg = GEN6_NOSYNC; - } else { - wait_mbox = sem_data[engine->hw_id][i].wait_mbox; - mbox_reg = sem_data[engine->hw_id][i].mbox_reg; - } - - engine->semaphore.mbox.wait[i] = wait_mbox; - engine->semaphore.mbox.signal[i] = mbox_reg; - } -} - static void intel_ring_init_irq(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine) { if (INTEL_GEN(dev_priv) >= 6) { engine->irq_enable = gen6_irq_enable; engine->irq_disable = gen6_irq_disable; - engine->irq_seqno_barrier = gen6_seqno_barrier; } else if (INTEL_GEN(dev_priv) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - engine->irq_seqno_barrier = gen5_seqno_barrier; } else if (INTEL_GEN(dev_priv) >= 3) { engine->irq_enable = i9xx_irq_enable; engine->irq_disable = i9xx_irq_disable; @@ -2222,28 +2210,23 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8); intel_ring_init_irq(dev_priv, engine); - intel_ring_init_semaphores(dev_priv, engine); engine->init_hw = init_ring_common; engine->reset.prepare = reset_prepare; engine->reset.reset = reset_ring; engine->reset.finish = reset_finish; - engine->context_pin = intel_ring_context_pin; + engine->cops = &ring_context_ops; engine->request_alloc = ring_request_alloc; - engine->emit_breadcrumb = i9xx_emit_breadcrumb; - engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; - if (HAS_LEGACY_SEMAPHORES(dev_priv)) { - int num_rings; - - engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; - - num_rings = INTEL_INFO(dev_priv)->num_rings - 1; - engine->emit_breadcrumb_sz += num_rings * 3; - if (num_rings & 1) - engine->emit_breadcrumb_sz++; - } + /* + * Using a global execution timeline; the previous final breadcrumb is + * equivalent to our next initial bread so we can elide + * engine->emit_init_breadcrumb(). + */ + engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; + if (IS_GEN(dev_priv, 5)) + engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; engine->set_default_submission = i9xx_set_default_submission; @@ -2269,12 +2252,15 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - if (INTEL_GEN(dev_priv) >= 6) { + if (INTEL_GEN(dev_priv) >= 7) { engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; - if (IS_GEN6(dev_priv)) - engine->emit_flush = gen6_render_ring_flush; - } else if (IS_GEN5(dev_priv)) { + engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; + } else if (IS_GEN(dev_priv, 6)) { + engine->init_context = intel_rcs_ctx_init; + engine->emit_flush = gen6_render_ring_flush; + engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; + } else if (IS_GEN(dev_priv, 5)) { engine->emit_flush = gen4_render_ring_flush; } else { if (INTEL_GEN(dev_priv) < 4) @@ -2304,13 +2290,18 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) >= 6) { /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) engine->set_default_submission = gen6_bsd_set_default_submission; engine->emit_flush = gen6_bsd_ring_flush; engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; + + if (IS_GEN(dev_priv, 6)) + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; + else + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; } else { engine->emit_flush = bsd_ring_flush; - if (IS_GEN5(dev_priv)) + if (IS_GEN(dev_priv, 5)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; @@ -2323,11 +2314,18 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + GEM_BUG_ON(INTEL_GEN(dev_priv) < 6); + intel_ring_default_vfuncs(dev_priv, engine); engine->emit_flush = gen6_ring_flush; engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; + if (IS_GEN(dev_priv, 6)) + engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; + else + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; + return intel_init_ring_buffer(engine); } @@ -2335,6 +2333,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + GEM_BUG_ON(INTEL_GEN(dev_priv) < 7); + intel_ring_default_vfuncs(dev_priv, engine); engine->emit_flush = gen6_ring_flush; @@ -2342,5 +2342,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) engine->irq_enable = hsw_vebox_irq_enable; engine->irq_disable = hsw_vebox_irq_disable; + engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; + return intel_init_ring_buffer(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..e58d6f04177b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -5,22 +5,21 @@ #include <drm/drm_util.h> #include <linux/hashtable.h> +#include <linux/irq_work.h> +#include <linux/random.h> #include <linux/seqlock.h> #include "i915_gem_batch_pool.h" - -#include "i915_reg.h" #include "i915_pmu.h" +#include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" #include "i915_timeline.h" +#include "intel_engine_types.h" #include "intel_gpu_commands.h" #include "intel_workarounds.h" struct drm_printer; -struct i915_sched_attr; - -#define I915_CMD_HASH_ORDER 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, * but keeps the logic simple. Indeed, the whole purpose of this macro is just @@ -28,31 +27,46 @@ struct i915_sched_attr; * workarounds! */ #define CACHELINE_BYTES 64 -#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) -struct intel_hw_status_page { - struct i915_vma *vma; - u32 *page_addr; - u32 ggtt_offset; -}; +/* + * The register defines to be used with the following macros need to accept a + * base param, e.g: + * + * REG_FOO(base) _MMIO((base) + <relative offset>) + * ENGINE_READ(engine, REG_FOO); + * + * register arrays are to be defined and accessed as follows: + * + * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>) + * ENGINE_READ_IDX(engine, REG_BAR, i) + */ -#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) -#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) +#define __ENGINE_REG_OP(op__, engine__, ...) \ + intel_uncore_##op__((engine__)->uncore, __VA_ARGS__) -#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) -#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) +#define __ENGINE_READ_OP(op__, engine__, reg__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base)) -#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) -#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) +#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__) +#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) +#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) +#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__) -#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) -#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) +#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ + __ENGINE_REG_OP(read64_2x32, (engine__), \ + lower_reg__((engine__)->mmio_base), \ + upper_reg__((engine__)->mmio_base)) -#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) -#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) +#define ENGINE_READ_IDX(engine__, reg__, idx__) \ + __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__))) -#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) -#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) +#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__)) + +#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__) +#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__) +#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__) /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. @@ -90,581 +104,24 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) return "unknown"; } -#define I915_MAX_SLICES 3 -#define I915_MAX_SUBSLICES 8 - -#define instdone_slice_mask(dev_priv__) \ - (IS_GEN7(dev_priv__) ? \ - 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) - -#define instdone_subslice_mask(dev_priv__) \ - (IS_GEN7(dev_priv__) ? \ - 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) - -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ - for ((slice__) = 0, (subslice__) = 0; \ - (slice__) < I915_MAX_SLICES; \ - (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ - (slice__) += ((subslice__) == 0)) \ - for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ - (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) - -struct intel_instdone { - u32 instdone; - /* The following exist only in the RCS engine */ - u32 slice_common; - u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; - u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; -}; - -struct intel_engine_hangcheck { - u64 acthd; - u32 seqno; - enum intel_engine_hangcheck_action action; - unsigned long action_timestamp; - int deadlock; - struct intel_instdone instdone; - struct i915_request *active_request; - bool stalled:1; - bool wedged:1; -}; - -struct intel_ring { - struct i915_vma *vma; - void *vaddr; - - struct i915_timeline *timeline; - struct list_head request_list; - struct list_head active_link; - - u32 head; - u32 tail; - u32 emit; - - u32 space; - u32 size; - u32 effective_size; -}; - -struct i915_gem_context; -struct drm_i915_reg_table; - -/* - * we use a single page to load ctx workarounds so all of these - * values are referred in terms of dwords - * - * struct i915_wa_ctx_bb: - * offset: specifies batch starting position, also helpful in case - * if we want to have multiple batches at different offsets based on - * some criteria. It is not a requirement at the moment but provides - * an option for future use. - * size: size of the batch in DWORDS - */ -struct i915_ctx_workarounds { - struct i915_wa_ctx_bb { - u32 offset; - u32 size; - } indirect_ctx, per_ctx; - struct i915_vma *vma; -}; - -struct i915_request; - -#define I915_MAX_VCS 4 -#define I915_MAX_VECS 2 - -/* - * Engine IDs definitions. - * Keep instances of the same type engine together. - */ -enum intel_engine_id { - RCS = 0, - BCS, - VCS, - VCS2, - VCS3, - VCS4, -#define _VCS(n) (VCS + (n)) - VECS, - VECS2 -#define _VECS(n) (VECS + (n)) -}; - -struct i915_priolist { - struct list_head requests[I915_PRIORITY_COUNT]; - struct rb_node node; - unsigned long used; - int priority; -}; - -#define priolist_for_each_request(it, plist, idx) \ - for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ - list_for_each_entry(it, &(plist)->requests[idx], sched.link) - -#define priolist_for_each_request_consume(it, n, plist, idx) \ - for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ - list_for_each_entry_safe(it, n, \ - &(plist)->requests[idx - 1], \ - sched.link) - -struct st_preempt_hang { - struct completion completion; - bool inject_hang; -}; - -/** - * struct intel_engine_execlists - execlist submission queue and port state - * - * The struct intel_engine_execlists represents the combined logical state of - * driver and the hardware state for execlist mode of submission. - */ -struct intel_engine_execlists { - /** - * @tasklet: softirq tasklet for bottom handler - */ - struct tasklet_struct tasklet; - - /** - * @default_priolist: priority list for I915_PRIORITY_NORMAL - */ - struct i915_priolist default_priolist; - - /** - * @no_priolist: priority lists disabled - */ - bool no_priolist; - - /** - * @submit_reg: gen-specific execlist submission register - * set to the ExecList Submission Port (elsp) register pre-Gen11 and to - * the ExecList Submission Queue Contents register array for Gen11+ - */ - u32 __iomem *submit_reg; - - /** - * @ctrl_reg: the enhanced execlists control register, used to load the - * submit queue on the HW and to request preemptions to idle - */ - u32 __iomem *ctrl_reg; - - /** - * @port: execlist port states - * - * For each hardware ELSP (ExecList Submission Port) we keep - * track of the last request and the number of times we submitted - * that port to hw. We then count the number of times the hw reports - * a context completion or preemption. As only one context can - * be active on hw, we limit resubmission of context to port[0]. This - * is called Lite Restore, of the context. - */ - struct execlist_port { - /** - * @request_count: combined request and submission count - */ - struct i915_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, execlists) ((p) - (execlists)->port) - - /** - * @context_id: context ID for port - */ - GEM_DEBUG_DECL(u32 context_id); - -#define EXECLIST_MAX_PORTS 2 - } port[EXECLIST_MAX_PORTS]; - - /** - * @active: is the HW active? We consider the HW as active after - * submitting any context for execution and until we have seen the - * last context completion event. After that, we do not expect any - * more events until we submit, and so can park the HW. - * - * As we have a small number of different sources from which we feed - * the HW, we track the state of each inside a single bitfield. - */ - unsigned int active; -#define EXECLISTS_ACTIVE_USER 0 -#define EXECLISTS_ACTIVE_PREEMPT 1 -#define EXECLISTS_ACTIVE_HWACK 2 - - /** - * @port_mask: number of execlist ports - 1 - */ - unsigned int port_mask; - - /** - * @queue_priority: Highest pending priority. - * - * When we add requests into the queue, or adjust the priority of - * executing requests, we compute the maximum priority of those - * pending requests. We can then use this value to determine if - * we need to preempt the executing requests to service the queue. - */ - int queue_priority; - - /** - * @queue: queue of requests, in priority lists - */ - struct rb_root_cached queue; - - /** - * @csb_write: control register for Context Switch buffer - * - * Note this register may be either mmio or HWSP shadow. - */ - u32 *csb_write; - - /** - * @csb_status: status array for Context Switch buffer - * - * Note these register may be either mmio or HWSP shadow. - */ - u32 *csb_status; - - /** - * @preempt_complete_status: expected CSB upon completing preemption - */ - u32 preempt_complete_status; - - /** - * @csb_head: context status buffer head - */ - u8 csb_head; - - I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) -}; - -#define INTEL_ENGINE_CS_MAX_NAME 8 - -struct intel_engine_cs { - struct drm_i915_private *i915; - char name[INTEL_ENGINE_CS_MAX_NAME]; - - enum intel_engine_id id; - unsigned int hw_id; - unsigned int guc_id; - - u8 uabi_id; - u8 uabi_class; - - u8 class; - u8 instance; - u32 context_size; - u32 mmio_base; - - struct intel_ring *buffer; - - struct i915_timeline timeline; - - struct drm_i915_gem_object *default_state; - void *pinned_default_state; - - unsigned long irq_posted; -#define ENGINE_IRQ_BREADCRUMB 0 - - /* Rather than have every client wait upon all user interrupts, - * with the herd waking after every interrupt and each doing the - * heavyweight seqno dance, we delegate the task (of being the - * bottom-half of the user interrupt) to the first client. After - * every interrupt, we wake up one client, who does the heavyweight - * coherent seqno read and either goes back to sleep (if incomplete), - * or wakes up all the completed clients in parallel, before then - * transferring the bottom-half status to the next client in the queue. - * - * Compared to walking the entire list of waiters in a single dedicated - * bottom-half, we reduce the latency of the first waiter by avoiding - * a context switch, but incur additional coherent seqno reads when - * following the chain of request breadcrumbs. Since it is most likely - * that we have a single client waiting on each seqno, then reducing - * the overhead of waking that client is much preferred. - */ - struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects irq_*; irqsafe */ - struct intel_wait *irq_wait; /* oldest waiter by retirement */ - - spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ - struct rb_root waiters; /* sorted by retirement, priority */ - struct list_head signals; /* sorted by retirement */ - struct task_struct *signaler; /* used for fence signalling */ - - struct timer_list fake_irq; /* used after a missed interrupt */ - struct timer_list hangcheck; /* detect missed interrupts */ - - unsigned int hangcheck_interrupts; - unsigned int irq_enabled; - unsigned int irq_count; - - bool irq_armed : 1; - I915_SELFTEST_DECLARE(bool mock : 1); - } breadcrumbs; - - struct { - /** - * @enable: Bitmask of enable sample events on this engine. - * - * Bits correspond to sample event types, for instance - * I915_SAMPLE_QUEUED is bit 0 etc. - */ - u32 enable; - /** - * @enable_count: Reference count for the enabled samplers. - * - * Index number corresponds to the bit number from @enable. - */ - unsigned int enable_count[I915_PMU_SAMPLE_BITS]; - /** - * @sample: Counter values for sampling events. - * - * Our internal timer stores the current counters in this field. - */ -#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) - struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; - } pmu; - - /* - * A pool of objects to use as shadow copies of client batch buffers - * when the command parser is enabled. Prevents the client from - * modifying the batch contents after software parsing. - */ - struct i915_gem_batch_pool batch_pool; - - struct intel_hw_status_page status_page; - struct i915_ctx_workarounds wa_ctx; - struct i915_wa_list ctx_wa_list; - struct i915_wa_list wa_list; - struct i915_wa_list whitelist; - - u32 irq_keep_mask; /* always keep these interrupts */ - u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - void (*irq_enable)(struct intel_engine_cs *engine); - void (*irq_disable)(struct intel_engine_cs *engine); - - int (*init_hw)(struct intel_engine_cs *engine); - - struct { - struct i915_request *(*prepare)(struct intel_engine_cs *engine); - void (*reset)(struct intel_engine_cs *engine, - struct i915_request *rq); - void (*finish)(struct intel_engine_cs *engine); - } reset; - - void (*park)(struct intel_engine_cs *engine); - void (*unpark)(struct intel_engine_cs *engine); - - void (*set_default_submission)(struct intel_engine_cs *engine); - - struct intel_context *(*context_pin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); - - int (*request_alloc)(struct i915_request *rq); - int (*init_context)(struct i915_request *rq); - - int (*emit_flush)(struct i915_request *request, u32 mode); -#define EMIT_INVALIDATE BIT(0) -#define EMIT_FLUSH BIT(1) -#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) - int (*emit_bb_start)(struct i915_request *rq, - u64 offset, u32 length, - unsigned int dispatch_flags); -#define I915_DISPATCH_SECURE BIT(0) -#define I915_DISPATCH_PINNED BIT(1) - void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs); - int emit_breadcrumb_sz; - - /* Pass the request to the hardware queue (e.g. directly into - * the legacy ringbuffer or to the end of an execlist). - * - * This is called from an atomic context with irqs disabled; must - * be irq safe. - */ - void (*submit_request)(struct i915_request *rq); - - /* - * Call when the priority on a request has changed and it and its - * dependencies may need rescheduling. Note the request itself may - * not be ready to run! - */ - void (*schedule)(struct i915_request *request, - const struct i915_sched_attr *attr); - - /* - * Cancel all requests on the hardware, or queued for execution. - * This should only cancel the ready requests that have been - * submitted to the engine (via the engine->submit_request callback). - * This is called when marking the device as wedged. - */ - void (*cancel_requests)(struct intel_engine_cs *engine); - - /* Some chipsets are not quite as coherent as advertised and need - * an expensive kick to force a true read of the up-to-date seqno. - * However, the up-to-date seqno is not always required and the last - * seen value is good enough. Note that the seqno will always be - * monotonic, even if not coherent. - */ - void (*irq_seqno_barrier)(struct intel_engine_cs *engine); - void (*cleanup)(struct intel_engine_cs *engine); - - /* GEN8 signal/wait table - never trust comments! - * signal to signal to signal to signal to signal to - * RCS VCS BCS VECS VCS2 - * -------------------------------------------------------------------- - * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | - * |------------------------------------------------------------------- - * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | - * |------------------------------------------------------------------- - * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | - * |------------------------------------------------------------------- - * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | - * |------------------------------------------------------------------- - * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | - * |------------------------------------------------------------------- - * - * Generalization: - * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) - * ie. transpose of g(x, y) - * - * sync from sync from sync from sync from sync from - * RCS VCS BCS VECS VCS2 - * -------------------------------------------------------------------- - * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | - * |------------------------------------------------------------------- - * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | - * |------------------------------------------------------------------- - * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | - * |------------------------------------------------------------------- - * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | - * |------------------------------------------------------------------- - * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | - * |------------------------------------------------------------------- - * - * Generalization: - * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) - * ie. transpose of f(x, y) - */ - struct { -#define GEN6_SEMAPHORE_LAST VECS_HW -#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) -#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) - struct { - /* our mbox written by others */ - u32 wait[GEN6_NUM_SEMAPHORES]; - /* mboxes this ring signals to */ - i915_reg_t signal[GEN6_NUM_SEMAPHORES]; - } mbox; - - /* AKA wait() */ - int (*sync_to)(struct i915_request *rq, - struct i915_request *signal); - u32 *(*signal)(struct i915_request *rq, u32 *cs); - } semaphore; - - struct intel_engine_execlists execlists; - - /* Contexts are pinned whilst they are active on the GPU. The last - * context executed remains active whilst the GPU is idle - the - * switch away and write to the context object only occurs on the - * next execution. Contexts are only unpinned on retirement of the - * following request ensuring that we can always write to the object - * on the context switch even after idling. Across suspend, we switch - * to the kernel context and trash it as the save may not happen - * before the hardware is powered down. - */ - struct intel_context *last_retired_context; - - /* status_notifier: list of callbacks for context-switch changes */ - struct atomic_notifier_head context_status_notifier; - - struct intel_engine_hangcheck hangcheck; - -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) -#define I915_ENGINE_SUPPORTS_STATS BIT(1) -#define I915_ENGINE_HAS_PREEMPTION BIT(2) - unsigned int flags; - - /* - * Table of commands the command parser needs to know about - * for this engine. - */ - DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); - - /* - * Table of registers allowed in commands that read/write registers. - */ - const struct drm_i915_reg_table *reg_tables; - int reg_table_count; +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); +static inline bool __execlists_need_preempt(int prio, int last) +{ /* - * Returns the bitmask for the length field of the specified command. - * Return 0 for an unrecognized/invalid command. + * Allow preemption of low -> normal -> high, but we do + * not allow low priority tasks to preempt other low priority + * tasks under the impression that latency for low priority + * tasks does not matter (as much as background throughput), + * so kiss. * - * If the command parser finds an entry for a command in the engine's - * cmd_tables, it gets the command's length based on the table entry. - * If not, it calls this function to determine the per-engine length - * field encoding for the command (i.e. different opcode ranges use - * certain bits to encode the command length in the header). + * More naturally we would write + * prio >= max(0, last); + * except that we wish to prevent triggering preemption at the same + * priority level: the task that is running should remain running + * to preserve FIFO ordering of dependencies. */ - u32 (*get_cmd_length_mask)(u32 cmd_header); - - struct { - /** - * @lock: Lock protecting the below fields. - */ - seqlock_t lock; - /** - * @enabled: Reference count indicating number of listeners. - */ - unsigned int enabled; - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - /** - * @enabled_at: Timestamp when busy stats were enabled. - */ - ktime_t enabled_at; - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - } stats; -}; - -static inline bool -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; -} - -static inline bool -intel_engine_supports_stats(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_SUPPORTS_STATS; -} - -static inline bool -intel_engine_has_preemption(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_HAS_PREEMPTION; -} - -static inline bool __execlists_need_preempt(int prio, int last) -{ - return prio > max(0, last); + return prio > max(I915_PRIORITY_NORMAL - 1, last); } static inline void @@ -732,17 +189,11 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, return port; } -static inline unsigned int -intel_engine_flag(const struct intel_engine_cs *engine) -{ - return BIT(engine->id); -} - static inline u32 intel_read_status_page(const struct intel_engine_cs *engine, int reg) { /* Ensure that the compiler doesn't optimize away the load. */ - return READ_ONCE(engine->status_page.page_addr[reg]); + return READ_ONCE(engine->status_page.addr[reg]); } static inline void @@ -755,12 +206,12 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) */ if (static_cpu_has(X86_FEATURE_CLFLUSH)) { mb(); - clflush(&engine->status_page.page_addr[reg]); - engine->status_page.page_addr[reg] = value; - clflush(&engine->status_page.page_addr[reg]); + clflush(&engine->status_page.addr[reg]); + engine->status_page.addr[reg] = value; + clflush(&engine->status_page.addr[reg]); mb(); } else { - WRITE_ONCE(engine->status_page.page_addr[reg], value); + WRITE_ONCE(engine->status_page.addr[reg], value); } } @@ -780,12 +231,14 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) * * The area from dword 0x30 to 0x3ff is available for driver usage. */ -#define I915_GEM_HWS_INDEX 0x30 -#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -#define I915_GEM_HWS_PREEMPT_INDEX 0x32 -#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -#define I915_GEM_HWS_SCRATCH_INDEX 0x40 -#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_PREEMPT 0x32 +#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32)) +#define I915_GEM_HWS_HANGCHECK 0x34 +#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32)) +#define I915_GEM_HWS_SEQNO 0x40 +#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) +#define I915_GEM_HWS_SCRATCH 0x80 +#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32)) #define I915_HWS_CSB_BUF0_INDEX 0x10 #define I915_HWS_CSB_WRITE_INDEX 0x1f @@ -799,7 +252,18 @@ int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); unsigned int intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); -void intel_ring_free(struct intel_ring *ring); +void intel_ring_free(struct kref *ref); + +static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) +{ + kref_get(&ring->ref); + return ring; +} + +static inline void intel_ring_put(struct intel_ring *ring) +{ + kref_put(&ring->ref, intel_ring_free); +} void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine); @@ -808,7 +272,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); int __must_check intel_ring_cacheline_align(struct i915_request *rq); -int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes); u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) @@ -889,9 +352,19 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) return tail; } -void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); +static inline unsigned int +__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) +{ + /* + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + */ + GEM_BUG_ON(!is_power_of_2(size)); + return (head - tail - CACHELINE_BYTES) & (size - 1); +} -void intel_engine_setup_common(struct intel_engine_cs *engine); +int intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); @@ -903,146 +376,37 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); +void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) -{ - /* - * We are only peeking at the tail of the submit queue (and not the - * queue itself) in order to gain a hint as to the current active - * state of the engine. Callers are not expected to be taking - * engine->timeline->lock, nor are they expected to be concerned - * wtih serialising this hint with anything, so document it as - * a hint and nothing more. - */ - return READ_ONCE(engine->timeline.seqno); -} - -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - -static inline bool intel_engine_signaled(struct intel_engine_cs *engine, - u32 seqno) -{ - return i915_seqno_passed(intel_engine_get_seqno(engine), seqno); -} - -static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, - u32 seqno) -{ - GEM_BUG_ON(!seqno); - return intel_engine_signaled(engine, seqno); -} - -static inline bool intel_engine_has_started(struct intel_engine_cs *engine, - u32 seqno) -{ - GEM_BUG_ON(!seqno); - return intel_engine_signaled(engine, seqno - 1); -} - void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); -/* - * Arbitrary size for largest possible 'add request' sequence. The code paths - * are complex and variable. Empirical measurement shows that the worst case - * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, - * we need to allocate double the largest single packet within that emission - * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). - */ -#define MIN_SPACE_FOR_ADD_REQUEST 336 - -static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) -{ - return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; -} - -static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) -{ - return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR; -} - -/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ -int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); - -static inline void intel_wait_init(struct intel_wait *wait) -{ - wait->tsk = current; - wait->request = NULL; -} - -static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) -{ - wait->tsk = current; - wait->seqno = seqno; -} - -static inline bool intel_wait_has_seqno(const struct intel_wait *wait) -{ - return wait->seqno; -} - -static inline bool -intel_wait_update_seqno(struct intel_wait *wait, u32 seqno) -{ - wait->seqno = seqno; - return intel_wait_has_seqno(wait); -} - -static inline bool -intel_wait_update_request(struct intel_wait *wait, - const struct i915_request *rq) -{ - return intel_wait_update_seqno(wait, i915_request_global_seqno(rq)); -} - -static inline bool -intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno) -{ - return wait->seqno == seqno; -} - -static inline bool -intel_wait_check_request(const struct intel_wait *wait, - const struct i915_request *rq) -{ - return intel_wait_check_seqno(wait, i915_request_global_seqno(rq)); -} +void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); -static inline bool intel_wait_complete(const struct intel_wait *wait) -{ - return RB_EMPTY_NODE(&wait->node); -} +void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine); +void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine); -bool intel_engine_add_wait(struct intel_engine_cs *engine, - struct intel_wait *wait); -void intel_engine_remove_wait(struct intel_engine_cs *engine, - struct intel_wait *wait); -bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup); -void intel_engine_cancel_signaling(struct i915_request *request); +bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); -static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) +static inline void +intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine) { - return READ_ONCE(engine->breadcrumbs.irq_wait); + irq_work_queue(&engine->breadcrumbs.irq_work); } -unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); -#define ENGINE_WAKEUP_WAITER BIT(0) -#define ENGINE_WAKEUP_ASLEEP BIT(1) - -void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine); -void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine); - -void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); +bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine); void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p); + static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); @@ -1055,7 +419,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) } static inline u32 * -gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset) +gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) { /* We're using qword write, offset should be aligned to 8 bytes. */ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); @@ -1065,8 +429,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset) * following the batch. */ *cs++ = GFX_OP_PIPE_CONTROL(6); - *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE; + *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB; *cs++ = gtt_offset; *cs++ = 0; *cs++ = value; @@ -1077,14 +440,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset) } static inline u32 * -gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) +gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) { /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ GEM_BUG_ON(gtt_offset & (1 << 5)); /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; + *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; *cs++ = 0; *cs++ = value; @@ -1092,12 +455,18 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) return cs; } -void intel_engines_sanitize(struct drm_i915_private *i915); +static inline void intel_engine_reset(struct intel_engine_cs *engine, + bool stalled) +{ + if (engine->reset.reset) + engine->reset.reset(engine, stalled); +} + +void intel_engines_sanitize(struct drm_i915_private *i915, bool force); bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct drm_i915_private *dev_priv); -bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine); void intel_engine_lost_context(struct intel_engine_cs *engine); void intel_engines_park(struct drm_i915_private *i915); @@ -1176,6 +545,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine); ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine); + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) @@ -1196,4 +568,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) #endif +static inline u32 +intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine) +{ + return engine->hangcheck.next_seqno = + next_pseudo_random32(engine->hangcheck.next_seqno); +} + +static inline u32 +intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 4350a5270423..40ddfbb97acb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -29,6 +29,8 @@ #include <linux/pm_runtime.h> #include <linux/vgaarb.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "intel_drv.h" @@ -49,6 +51,268 @@ * present for a given platform. */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +#include <linux/sort.h> + +#define STACKDEPTH 8 + +static noinline depot_stack_handle_t __save_depot_stack(void) +{ + unsigned long entries[STACKDEPTH]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + .skip = 1, + }; + + save_stack_trace(&trace); + if (trace.nr_entries && + trace.entries[trace.nr_entries - 1] == ULONG_MAX) + trace.nr_entries--; + + return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN); +} + +static void __print_depot_stack(depot_stack_handle_t stack, + char *buf, int sz, int indent) +{ + unsigned long entries[STACKDEPTH]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + }; + + depot_fetch_stack(stack, &trace); + snprint_stack_trace(buf, sz, &trace, indent); +} + +static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + + spin_lock_init(&rpm->debug.lock); +} + +static noinline depot_stack_handle_t +track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + depot_stack_handle_t stack, *stacks; + unsigned long flags; + + atomic_inc(&rpm->wakeref_count); + assert_rpm_wakelock_held(i915); + + if (!HAS_RUNTIME_PM(i915)) + return -1; + + stack = __save_depot_stack(); + if (!stack) + return -1; + + spin_lock_irqsave(&rpm->debug.lock, flags); + + if (!rpm->debug.count) + rpm->debug.last_acquire = stack; + + stacks = krealloc(rpm->debug.owners, + (rpm->debug.count + 1) * sizeof(*stacks), + GFP_NOWAIT | __GFP_NOWARN); + if (stacks) { + stacks[rpm->debug.count++] = stack; + rpm->debug.owners = stacks; + } else { + stack = -1; + } + + spin_unlock_irqrestore(&rpm->debug.lock, flags); + + return stack; +} + +static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + depot_stack_handle_t stack) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + unsigned long flags, n; + bool found = false; + + if (unlikely(stack == -1)) + return; + + spin_lock_irqsave(&rpm->debug.lock, flags); + for (n = rpm->debug.count; n--; ) { + if (rpm->debug.owners[n] == stack) { + memmove(rpm->debug.owners + n, + rpm->debug.owners + n + 1, + (--rpm->debug.count - n) * sizeof(stack)); + found = true; + break; + } + } + spin_unlock_irqrestore(&rpm->debug.lock, flags); + + if (WARN(!found, + "Unmatched wakeref (tracking %lu), count %u\n", + rpm->debug.count, atomic_read(&rpm->wakeref_count))) { + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); + + stack = READ_ONCE(rpm->debug.last_release); + if (stack) { + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); + } + + kfree(buf); + } +} + +static int cmphandle(const void *_a, const void *_b) +{ + const depot_stack_handle_t * const a = _a, * const b = _b; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static void +__print_intel_runtime_pm_wakeref(struct drm_printer *p, + const struct intel_runtime_pm_debug *dbg) +{ + unsigned long i; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (dbg->last_acquire) { + __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref last acquired:\n%s", buf); + } + + if (dbg->last_release) { + __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref last released:\n%s", buf); + } + + drm_printf(p, "Wakeref count: %lu\n", dbg->count); + + sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); + + for (i = 0; i < dbg->count; i++) { + depot_stack_handle_t stack = dbg->owners[i]; + unsigned long rep; + + rep = 1; + while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) + rep++, i++; + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); + } + + kfree(buf); +} + +static noinline void +untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm_debug dbg = {}; + struct drm_printer p; + unsigned long flags; + + assert_rpm_wakelock_held(i915); + if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count, + &rpm->debug.lock, + flags)) { + dbg = rpm->debug; + + rpm->debug.owners = NULL; + rpm->debug.count = 0; + rpm->debug.last_release = __save_depot_stack(); + + spin_unlock_irqrestore(&rpm->debug.lock, flags); + } + if (!dbg.count) + return; + + p = drm_debug_printer("i915"); + __print_intel_runtime_pm_wakeref(&p, &dbg); + + kfree(dbg.owners); +} + +void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p) +{ + struct intel_runtime_pm_debug dbg = {}; + + do { + struct i915_runtime_pm *rpm = &i915->runtime_pm; + unsigned long alloc = dbg.count; + depot_stack_handle_t *s; + + spin_lock_irq(&rpm->debug.lock); + dbg.count = rpm->debug.count; + if (dbg.count <= alloc) { + memcpy(dbg.owners, + rpm->debug.owners, + dbg.count * sizeof(*s)); + } + dbg.last_acquire = rpm->debug.last_acquire; + dbg.last_release = rpm->debug.last_release; + spin_unlock_irq(&rpm->debug.lock); + if (dbg.count <= alloc) + break; + + s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL); + if (!s) + goto out; + + dbg.owners = s; + } while (1); + + __print_intel_runtime_pm_wakeref(p, &dbg); + +out: + kfree(dbg.owners); +} + +#else + +static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ +} + +static depot_stack_handle_t +track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + atomic_inc(&i915->runtime_pm.wakeref_count); + assert_rpm_wakelock_held(i915); + return -1; +} + +static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + assert_rpm_wakelock_held(i915); + atomic_dec(&i915->runtime_pm.wakeref_count); +} + +#endif + bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -301,7 +565,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - WARN_ON(intel_wait_for_register(dev_priv, + WARN_ON(intel_wait_for_register(&dev_priv->uncore, regs->driver, HSW_PWR_WELL_CTL_STATE(pw_idx), HSW_PWR_WELL_CTL_STATE(pw_idx), @@ -356,7 +620,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, enum skl_power_gate pg) { /* Timeout 5us for PG#0, for other PGs 1us */ - WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS, + WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS, SKL_FUSE_PG_DIST_STATUS(pg), SKL_FUSE_PG_DIST_STATUS(pg), 1)); } @@ -509,7 +773,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, * BIOS's own request bits, which are forced-on for these power wells * when exiting DC5/6. */ - if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) && + if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) val |= I915_READ(regs->bios); @@ -639,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored. */ -static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) +static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) { - uint32_t val; - uint32_t mask; + u32 val; + u32 mask; if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) state &= dev_priv->csr.allowed_dc_mask; @@ -1257,7 +1521,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, @@ -1274,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; enum pipe pipe; - uint32_t tmp; + u32 tmp; WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); @@ -1292,7 +1556,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), PHY_POWERGOOD(phy), @@ -1496,7 +1760,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); /* * We only ever set the power-on and power-gate states, anything * else is unexpected. @@ -1508,7 +1772,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, * A transient state at this point would mean some unexpected party * is poking at the power controls too. */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); WARN_ON(ctrl << 16 != state); mutex_unlock(&dev_priv->pcu_lock); @@ -1529,20 +1793,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); #define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) if (COND) goto out; - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); ctrl &= ~DP_SSC_MASK(pipe); ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); if (wait_for(COND, 100)) DRM_ERROR("timeout setting power well state %08x (%08x)\n", state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); #undef COND @@ -1591,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - - intel_runtime_pm_get(dev_priv); + intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); mutex_unlock(&power_domains->lock); + + return wakeref; } /** @@ -1617,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ -bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + intel_wakeref_t wakeref; bool is_enabled; - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return false; mutex_lock(&power_domains->lock); @@ -1637,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - if (!is_enabled) - intel_runtime_pm_put(dev_priv); + if (!is_enabled) { + intel_runtime_pm_put(dev_priv, wakeref); + wakeref = 0; + } - return is_enabled; + return wakeref; } -/** - * intel_display_power_put - release a power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function drops the power domain reference obtained by - * intel_display_power_get() and might power down the corresponding hardware - * block right away if this is the last reference. - */ -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +static void __intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains; struct i915_power_well *power_well; @@ -1671,10 +1932,34 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_power_well_put(dev_priv, power_well); mutex_unlock(&power_domains->lock); +} - intel_runtime_pm_put(dev_priv); +/** + * intel_display_power_put - release a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + */ +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put_unchecked(dev_priv); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put(dev_priv, wakeref); +} +#endif + #define I830_PIPES_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ @@ -3043,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, return 1; } -static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, - int enable_dc) +static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, + int enable_dc) { - uint32_t mask; + u32 mask; int requested_dc; int max_dc; @@ -3058,7 +3343,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, * suspend/resume, so allow it unconditionally. */ mask = DC_STATE_EN_DC9; - } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) { + } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { max_dc = 2; mask = 0; } else if (IS_GEN9_LP(dev_priv)) { @@ -3291,7 +3576,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv) !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power enable timeout\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 2; + /* + * FIXME: for now pretend that we only have 1 slice, see + * intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_dbuf_disable(struct drm_i915_private *dev_priv) @@ -3306,12 +3595,16 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power disable timeout!\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 0; + /* + * FIXME: for now pretend that the first slice is always + * enabled, see intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_mbus_init(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; val = MBUS_ABOX_BT_CREDIT_POOL1(16) | MBUS_ABOX_BT_CREDIT_POOL2(16) | @@ -3622,7 +3915,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * current lane status. */ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { - uint32_t status = I915_READ(DPLL(PIPE_A)); + u32 status = I915_READ(DPLL(PIPE_A)); unsigned int mask; mask = status & DPLL_PORTB_READY_MASK; @@ -3653,7 +3946,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) } if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { - uint32_t status = I915_READ(DPIO_PHY_STATUS); + u32 status = I915_READ(DPIO_PHY_STATUS); unsigned int mask; mask = status & DPLL_PORTD_READY_MASK; @@ -3708,11 +4001,41 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) cmn->desc->ops->disable(dev_priv, cmn); } +static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) +{ + bool ret; + + mutex_lock(&dev_priv->pcu_lock); + ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; + mutex_unlock(&dev_priv->pcu_lock); + + return ret; +} + +static void assert_ved_power_gated(struct drm_i915_private *dev_priv) +{ + WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), + "VED not power gated\n"); +} + +static void assert_isp_power_gated(struct drm_i915_private *dev_priv) +{ + static const struct pci_device_id isp_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, + {} + }; + + WARN(!pci_dev_present(isp_ids) && + !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), + "ISP not power gated\n"); +} + static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); /** * intel_power_domains_init_hw - initialize hardware power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all @@ -3726,30 +4049,34 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); * intel_power_domains_enable()) and must be paired with * intel_power_domains_fini_hw(). */ -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) +void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; power_domains->initializing = true; - if (IS_ICELAKE(dev_priv)) { - icl_display_core_init(dev_priv, resume); - } else if (IS_CANNONLAKE(dev_priv)) { - cnl_display_core_init(dev_priv, resume); - } else if (IS_GEN9_BC(dev_priv)) { - skl_display_core_init(dev_priv, resume); - } else if (IS_GEN9_LP(dev_priv)) { - bxt_display_core_init(dev_priv, resume); - } else if (IS_CHERRYVIEW(dev_priv)) { + if (IS_ICELAKE(i915)) { + icl_display_core_init(i915, resume); + } else if (IS_CANNONLAKE(i915)) { + cnl_display_core_init(i915, resume); + } else if (IS_GEN9_BC(i915)) { + skl_display_core_init(i915, resume); + } else if (IS_GEN9_LP(i915)) { + bxt_display_core_init(i915, resume); + } else if (IS_CHERRYVIEW(i915)) { mutex_lock(&power_domains->lock); - chv_phy_control_init(dev_priv); + chv_phy_control_init(i915); mutex_unlock(&power_domains->lock); - } else if (IS_VALLEYVIEW(dev_priv)) { + assert_isp_power_gated(i915); + } else if (IS_VALLEYVIEW(i915)) { mutex_lock(&power_domains->lock); - vlv_cmnlane_wa(dev_priv); + vlv_cmnlane_wa(i915); mutex_unlock(&power_domains->lock); - } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7) - intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + assert_ved_power_gated(i915); + assert_isp_power_gated(i915); + } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) { + intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); + } /* * Keep all power wells enabled for any dependent HW access during @@ -3757,18 +4084,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) * resources powered until display HW readout is complete. We drop * this reference in intel_power_domains_enable(). */ - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + /* Disable power support if the user asked so. */ if (!i915_modparams.disable_power_well) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - intel_power_domains_sync_hw(dev_priv); + intel_display_power_get(i915, POWER_DOMAIN_INIT); + intel_power_domains_sync_hw(i915); power_domains->initializing = false; } /** * intel_power_domains_fini_hw - deinitialize hw power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * * De-initializes the display power domain HW state. It also ensures that the * device stays powered up so that the driver can be reloaded. @@ -3777,21 +4106,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw(). */ -void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) +void intel_power_domains_fini_hw(struct drm_i915_private *i915) { - /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(dev_priv); + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(i915); - intel_power_domains_verify_state(dev_priv); + /* Keep the power well enabled, but cancel its rpm wakeref. */ + intel_runtime_pm_put(i915, wakeref); } /** * intel_power_domains_enable - enable toggling of display power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Enable the ondemand enabling/disabling of the display power wells. Note that * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled @@ -3801,30 +4133,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) * of display HW readout (which will acquire the power references reflecting * the current HW state). */ -void intel_power_domains_enable(struct drm_i915_private *dev_priv) +void intel_power_domains_enable(struct drm_i915_private *i915) { - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); - intel_power_domains_verify_state(dev_priv); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); + intel_power_domains_verify_state(i915); } /** * intel_power_domains_disable - disable toggling of display power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Disable the ondemand enabling/disabling of the display power wells. See * intel_power_domains_enable() for which power wells this call controls. */ -void intel_power_domains_disable(struct drm_i915_private *dev_priv) +void intel_power_domains_disable(struct drm_i915_private *i915) { - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + struct i915_power_domains *power_domains = &i915->power_domains; - intel_power_domains_verify_state(dev_priv); + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(i915); } /** * intel_power_domains_suspend - suspend power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) * * This function prepares the hardware power domain state before entering @@ -3833,12 +4171,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv) * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ -void intel_power_domains_suspend(struct drm_i915_private *dev_priv, +void intel_power_domains_suspend(struct drm_i915_private *i915, enum i915_drm_suspend_mode suspend_mode) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&power_domains->wakeref); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); /* * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 @@ -3847,10 +4187,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && - dev_priv->csr.dmc_payload != NULL) { - intel_power_domains_verify_state(dev_priv); + i915->csr.dmc_payload) { + intel_power_domains_verify_state(i915); return; } @@ -3859,25 +4199,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * power wells if power domains must be deinitialized for suspend. */ if (!i915_modparams.disable_power_well) { - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_power_domains_verify_state(dev_priv); + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + intel_power_domains_verify_state(i915); } - if (IS_ICELAKE(dev_priv)) - icl_display_core_uninit(dev_priv); - else if (IS_CANNONLAKE(dev_priv)) - cnl_display_core_uninit(dev_priv); - else if (IS_GEN9_BC(dev_priv)) - skl_display_core_uninit(dev_priv); - else if (IS_GEN9_LP(dev_priv)) - bxt_display_core_uninit(dev_priv); + if (IS_ICELAKE(i915)) + icl_display_core_uninit(i915); + else if (IS_CANNONLAKE(i915)) + cnl_display_core_uninit(i915); + else if (IS_GEN9_BC(i915)) + skl_display_core_uninit(i915); + else if (IS_GEN9_LP(i915)) + bxt_display_core_uninit(i915); power_domains->display_core_suspended = true; } /** * intel_power_domains_resume - resume power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function resume the hardware power domain state during system resume. * @@ -3885,28 +4225,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * intel_power_domains_enable()) and must be paired with * intel_power_domains_suspend(). */ -void intel_power_domains_resume(struct drm_i915_private *dev_priv) +void intel_power_domains_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; if (power_domains->display_core_suspended) { - intel_power_domains_init_hw(dev_priv, true); + intel_power_domains_init_hw(i915, true); power_domains->display_core_suspended = false; } else { - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); } - intel_power_domains_verify_state(dev_priv); + intel_power_domains_verify_state(i915); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) +static void intel_power_domains_dump_info(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; struct i915_power_well *power_well; - for_each_power_well(dev_priv, power_well) { + for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%-25s %d\n", @@ -3921,7 +4263,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) /** * intel_power_domains_verify_state - verify the HW/SW state for all power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Verify if the reference count of each power well matches its HW enabled * state and the total refcount of the domains it belongs to. This must be @@ -3929,22 +4271,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) * acquiring reference counts for any power wells in use and disabling the * ones left on by BIOS but not required by any active output. */ -static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; struct i915_power_well *power_well; bool dump_domain_info; mutex_lock(&power_domains->lock); dump_domain_info = false; - for_each_power_well(dev_priv, power_well) { + for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; int domains_count; bool enabled; - enabled = power_well->desc->ops->is_enabled(dev_priv, - power_well); + enabled = power_well->desc->ops->is_enabled(i915, power_well); if ((power_well->count || power_well->desc->always_on) != enabled) DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", @@ -3968,7 +4309,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) static bool dumped; if (!dumped) { - intel_power_domains_dump_info(dev_priv); + intel_power_domains_dump_info(i915); dumped = true; } } @@ -3978,7 +4319,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) #else -static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *i915) { } @@ -3986,30 +4327,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) /** * intel_runtime_pm_get - grab a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on) and ensures that it is powered up. * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -void intel_runtime_pm_get(struct drm_i915_private *dev_priv) +intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; int ret; ret = pm_runtime_get_sync(kdev); WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); - atomic_inc(&dev_priv->runtime_pm.wakeref_count); - assert_rpm_wakelock_held(dev_priv); + return track_intel_runtime_pm_wakeref(i915); } /** * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference if the device is * already in use and ensures that it is powered up. It is illegal to try @@ -4018,12 +4360,13 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. * - * Returns: True if the wakeref was acquired, or False otherwise. + * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates + * as True if the wakeref was acquired, or False otherwise. */ -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { if (IS_ENABLED(CONFIG_PM)) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* @@ -4033,18 +4376,15 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) * atm to the late/early system suspend/resume handlers. */ if (pm_runtime_get_if_in_use(kdev) <= 0) - return false; + return 0; } - atomic_inc(&dev_priv->runtime_pm.wakeref_count); - assert_rpm_wakelock_held(dev_priv); - - return true; + return track_intel_runtime_pm_wakeref(i915); } /** * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on). @@ -4058,41 +4398,50 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) +intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(i915); pm_runtime_get_noresume(kdev); - atomic_inc(&dev_priv->runtime_pm.wakeref_count); + return track_intel_runtime_pm_wakeref(i915); } /** * intel_runtime_pm_put - release a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *dev_priv) +void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; - assert_rpm_wakelock_held(dev_priv); - atomic_dec(&dev_priv->runtime_pm.wakeref_count); + untrack_intel_runtime_pm_wakeref(i915); pm_runtime_mark_last_busy(kdev); pm_runtime_put_autosuspend(kdev); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +{ + cancel_intel_runtime_pm_wakeref(i915, wref); + intel_runtime_pm_put_unchecked(i915); +} +#endif + /** * intel_runtime_pm_enable - enable runtime pm - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function enables runtime pm at the end of the driver load sequence. * @@ -4100,9 +4449,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) * subordinate display power domains. That is done by * intel_power_domains_enable(). */ -void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) +void intel_runtime_pm_enable(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* @@ -4124,7 +4473,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) * so the driver's own RPM reference tracking asserts also work on * platforms without RPM support. */ - if (!HAS_RUNTIME_PM(dev_priv)) { + if (!HAS_RUNTIME_PM(i915)) { int ret; pm_runtime_dont_use_autosuspend(kdev); @@ -4142,17 +4491,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) pm_runtime_put_autosuspend(kdev); } -void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) +void intel_runtime_pm_disable(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* Transfer rpm ownership back to core */ - WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, + WARN(pm_runtime_get_sync(kdev) < 0, "Failed to pass rpm ownership back to core\n"); pm_runtime_dont_use_autosuspend(kdev); - if (!HAS_RUNTIME_PM(dev_priv)) + if (!HAS_RUNTIME_PM(i915)) pm_runtime_put(kdev); } + +void intel_runtime_pm_cleanup(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + int count; + + count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */ + WARN(count, + "i915->runtime_pm.wakeref_count=%d on cleanup\n", + count); + + untrack_intel_runtime_pm_wakeref(i915); +} + +void intel_runtime_pm_init_early(struct drm_i915_private *i915) +{ + init_intel_runtime_pm_wakeref(i915); +} diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 5805ec1aba12..68f497493d43 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/export.h> -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -77,7 +76,7 @@ struct intel_sdvo { i915_reg_t sdvo_reg; /* Active outputs controlled by this SDVO output */ - uint16_t controlled_output; + u16 controlled_output; /* * Capabilities of the SDVO device returned by @@ -92,33 +91,32 @@ struct intel_sdvo { * For multiple function SDVO device, * this is for current attached outputs. */ - uint16_t attached_output; + u16 attached_output; /* * Hotplug activation bits for this device */ - uint16_t hotplug_active; + u16 hotplug_active; enum port port; bool has_hdmi_monitor; bool has_hdmi_audio; - bool rgb_quant_range_selectable; /* DDC bus used by this SDVO encoder */ - uint8_t ddc_bus; + u8 ddc_bus; /* * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd */ - uint8_t dtd_sdvo_flags; + u8 dtd_sdvo_flags; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ - uint16_t output_flag; + u16 output_flag; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; @@ -186,7 +184,7 @@ to_intel_sdvo_connector(struct drm_connector *connector) container_of((conn_state), struct intel_sdvo_connector_state, base.base) static bool -intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, @@ -748,9 +746,9 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, - uint16_t clock, - uint16_t width, - uint16_t height) + u16 clock, + u16 width, + u16 height) { struct intel_sdvo_preferred_input_timing_args args; @@ -793,9 +791,9 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { - uint16_t width, height; - uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; - uint16_t h_sync_offset, v_sync_offset; + u16 width, height; + u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; + u16 h_sync_offset, v_sync_offset; int mode_clock; memset(dtd, 0, sizeof(*dtd)); @@ -900,13 +898,13 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, - uint8_t mode) + u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, - uint8_t mode) + u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } @@ -915,11 +913,11 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; - uint8_t set_buf_index[2]; - uint8_t av_split; - uint8_t buf_size; - uint8_t buf[48]; - uint8_t *pos; + u8 set_buf_index[2]; + u8 av_split; + u8 buf_size; + u8 buf[48]; + u8 *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); @@ -942,11 +940,11 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) #endif static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, - unsigned if_index, uint8_t tx_rate, - const uint8_t *data, unsigned length) + unsigned int if_index, u8 tx_rate, + const u8 *data, unsigned int length) { - uint8_t set_buf_index[2] = { if_index, 0 }; - uint8_t hbuf_size, tmp[8]; + u8 set_buf_index[2] = { if_index, 0 }; + u8 hbuf_size, tmp[8]; int i; if (!intel_sdvo_set_value(intel_sdvo, @@ -980,33 +978,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, &tx_rate, 1); } -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - const struct intel_crtc_state *pipe_config) +static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, + unsigned int if_index, + u8 *data, unsigned int length) { - uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; - union hdmi_infoframe frame; + u8 set_buf_index[2] = { if_index, 0 }; + u8 hbuf_size, tx_rate, av_split; + int i; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_HBUF_AV_SPLIT, + &av_split, 1)) + return -ENXIO; + + if (av_split < if_index) + return 0; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_HBUF_TXRATE, + &tx_rate, 1)) + return -ENXIO; + + if (tx_rate == SDVO_HBUF_TX_DISABLED) + return 0; + + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return -ENXIO; + + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + &hbuf_size, 1)) + return -ENXIO; + + /* Buffer size is 0 based, hooray! */ + hbuf_size++; + + DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + if_index, length, hbuf_size); + + hbuf_size = min_t(unsigned int, length, hbuf_size); + + for (i = 0; i < hbuf_size; i += 8) { + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0)) + return -ENXIO; + if (!intel_sdvo_read_response(intel_sdvo, &data[i], + min_t(unsigned int, 8, hbuf_size - i))) + return -ENXIO; + } + + return hbuf_size; +} + +static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; int ret; - ssize_t len; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - &pipe_config->base.adjusted_mode, - false); - if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); + if (!crtc_state->has_hdmi_sink) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = drm_hdmi_avi_infoframe_from_display_mode(frame, + conn_state->connector, + adjusted_mode); + if (ret) return false; - } - if (intel_sdvo->rgb_quant_range_selectable) { - if (pipe_config->limited_color_range) - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_LIMITED; - else - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_FULL; - } + drm_hdmi_avi_infoframe_quant_range(frame, + conn_state->connector, + adjusted_mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + + ret = hdmi_avi_infoframe_check(frame); + if (WARN_ON(ret)) + return false; - len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data)); - if (len < 0) + return true; +} + +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state) +{ + u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + const union hdmi_infoframe *frame = &crtc_state->infoframes.avi; + ssize_t len; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0) + return true; + + if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI)) + return false; + + len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data)); + if (WARN_ON(len < 0)) return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, @@ -1014,11 +1088,45 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, sdvo_data, sizeof(sdvo_data)); } +static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, + struct intel_crtc_state *crtc_state) +{ + u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + union hdmi_infoframe *frame = &crtc_state->infoframes.avi; + ssize_t len; + int ret; + + if (!crtc_state->has_hdmi_sink) + return; + + len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, + sdvo_data, sizeof(sdvo_data)); + if (len < 0) { + DRM_DEBUG_KMS("failed to read AVI infoframe\n"); + return; + } else if (len == 0) { + return; + } + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data)); + if (ret) { + DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n"); + return; + } + + if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI) + DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, HDMI_INFOFRAME_TYPE_AVI); +} + static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state) { struct intel_sdvo_tv_format format; - uint32_t format_map; + u32 format_map; format_map = 1 << conn_state->tv.mode; memset(&format, 0, sizeof(format)); @@ -1108,9 +1216,9 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) pipe_config->clock_set = true; } -static bool intel_sdvo_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_sdvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_connector_state *intel_sdvo_state = @@ -1135,7 +1243,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, */ if (IS_TV(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) - return false; + return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, @@ -1145,7 +1253,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, } else if (IS_LVDS(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo_connector->base.panel.fixed_mode)) - return false; + return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, @@ -1154,7 +1262,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* * Make the CRTC code factor in the SDVO pixel multiplier. The @@ -1194,7 +1302,13 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, if (intel_sdvo_connector->is_hdmi) adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; - return true; + if (!intel_sdvo_compute_avi_infoframe(intel_sdvo, + pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad AVI infoframe\n"); + return -EINVAL; + } + + return 0; } #define UPDATE_PROPERTY(input, NAME) \ @@ -1209,7 +1323,7 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state = &sdvo_state->base.base; struct intel_sdvo_connector *intel_sdvo_conn = to_intel_sdvo_connector(conn_state->connector); - uint16_t val; + u16 val; if (intel_sdvo_conn->left) UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H); @@ -1507,6 +1621,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } } + WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, + "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", + pipe_config->pixel_multiplier, encoder_pixel_multiplier); + if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; @@ -1519,9 +1637,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; } - WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, - "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", - pipe_config->pixel_multiplier, encoder_pixel_multiplier); + intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config); } static void intel_disable_sdvo(struct intel_encoder *encoder, @@ -1692,10 +1808,10 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) +static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); - uint16_t hotplug; + u16 hotplug; if (!I915_HAS_HOTPLUG(dev_priv)) return 0; @@ -1802,8 +1918,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) if (intel_sdvo_connector->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); - intel_sdvo->rgb_quant_range_selectable = - drm_rgb_quant_range_selectable(edid); } } else status = connector_status_disconnected; @@ -1828,7 +1942,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { - uint16_t response; + u16 response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; @@ -1852,7 +1966,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) intel_sdvo->has_hdmi_monitor = false; intel_sdvo->has_hdmi_audio = false; - intel_sdvo->rgb_quant_range_selectable = false; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; @@ -1980,7 +2093,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; - uint32_t reply = 0, format_map = 0; + u32 reply = 0, format_map = 0; int i; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2065,7 +2178,7 @@ static int intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state); @@ -2124,7 +2237,7 @@ static int intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val) + u64 val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); @@ -2273,7 +2386,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { - uint16_t mask = 0; + u16 mask = 0; unsigned int num_bits; /* @@ -2674,7 +2787,7 @@ err: } static bool -intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ @@ -2750,7 +2863,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; - uint32_t format_map, i; + u32 format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; @@ -2817,7 +2930,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); - uint16_t response, data_value[2]; + u16 response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { @@ -2928,7 +3041,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; - uint16_t response, data_value[2]; + u16 response, data_value[2]; ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS); @@ -2942,7 +3055,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, { union { struct intel_sdvo_enhancements_reply reply; - uint16_t response; + u16 response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 75c872bb8cc9..57de41b1f989 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", @@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val); I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", @@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, u32 value = 0; WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); @@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, @@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); @@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..65de7387bf1b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -29,7 +29,6 @@ * registers; newer ones are much simpler and we can use the new DRM plane * support. */ -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> @@ -42,6 +41,19 @@ #include "i915_drv.h" #include <drm/drm_color_mgmt.h> +bool is_planar_yuv_format(u32 pixelformat) +{ + switch (pixelformat) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return true; + default: + return false; + } +} + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -257,7 +269,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->base.fb; struct drm_rect *src = &plane_state->base.src; - u32 src_x, src_y, src_w, src_h; + u32 src_x, src_y, src_w, src_h, hsub, vsub; + bool rotated = drm_rotation_90_or_270(plane_state->base.rotation); /* * Hardware doesn't handle subpixel coordinates. @@ -275,18 +288,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) src->y1 = src_y << 16; src->y2 = (src_y + src_h) << 16; - if (fb->format->is_yuv && - (src_x & 1 || src_w & 1)) { - DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", - src_x, src_w); + if (!fb->format->is_yuv) + return 0; + + /* YUV specific checks */ + if (!rotated) { + hsub = fb->format->hsub; + vsub = fb->format->vsub; + } else { + hsub = vsub = max(fb->format->hsub, fb->format->vsub); + } + + if (src_x % hsub || src_w % hsub) { + DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n", + src_x, src_w, hsub, rotated ? "rotated " : ""); return -EINVAL; } - if (fb->format->is_yuv && - fb->format->num_planes > 1 && - (src_y & 1 || src_h & 1)) { - DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n", - src_y, src_h); + if (src_y % vsub || src_h % vsub) { + DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n", + src_y, src_h, vsub, rotated ? "rotated " : ""); return -EINVAL; } @@ -322,8 +343,8 @@ skl_program_scaler(struct intel_plane *plane, &crtc_state->scaler_state.scalers[scaler_id]; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); u16 y_hphase, uv_rgb_hphase; u16 y_vphase, uv_rgb_vphase; int hscale, vscale; @@ -336,8 +357,8 @@ skl_program_scaler(struct intel_plane *plane, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ - if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 && - !icl_is_hdr_plane(plane)) { + if (is_planar_yuv_format(plane_state->base.fb->format->format) && + !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -478,23 +499,30 @@ skl_program_plane(struct intel_plane *plane, u32 aux_stride = skl_plane_stride(plane_state, 1); int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t x = plane_state->color_plane[color_plane].x; - uint32_t y = plane_state->color_plane[color_plane].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 x = plane_state->color_plane[color_plane].x; + u32 y = plane_state->color_plane[color_plane].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; struct intel_plane *linked = plane_state->linked_plane; const struct drm_framebuffer *fb = plane_state->base.fb; u8 alpha = plane_state->base.alpha >> 8; + u32 plane_color_ctl = 0; unsigned long irqflags; u32 keymsk, keymax; + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + plane_color_ctl = plane_state->color_ctl | + glk_plane_color_ctl_crtc(crtc_state); + /* Sizes are 0 based */ src_w--; src_h--; keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - keymsk = key->channel_mask & 0x3ffffff; + keymsk = key->channel_mask & 0x7ffffff; if (alpha < 0xff) keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; @@ -512,7 +540,7 @@ skl_program_plane(struct intel_plane *plane, I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), (plane_state->color_plane[1].offset - surf_addr) | aux_stride); - if (icl_is_hdr_plane(plane)) { + if (icl_is_hdr_plane(dev_priv, plane_id)) { u32 cus_ctl = 0; if (linked) { @@ -534,10 +562,9 @@ skl_program_plane(struct intel_plane *plane, } if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), - plane_state->color_ctl); + I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); - if (fb->format->is_yuv && icl_is_hdr_plane(plane)) + if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); skl_write_plane_wm(plane, crtc_state); @@ -604,6 +631,9 @@ skl_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (icl_is_hdr_plane(dev_priv, plane_id)) + I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0); + skl_write_plane_wm(plane, crtc_state); I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); @@ -619,17 +649,19 @@ skl_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -732,6 +764,16 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } +static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + u32 sprctl = 0; + + if (crtc_state->gamma_enable) + sprctl |= SP_GAMMA_ENABLE; + + return sprctl; +} + static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -740,7 +782,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; - sprctl = SP_ENABLE | SP_GAMMA_ENABLE; + sprctl = SP_ENABLE; switch (fb->format->format) { case DRM_FORMAT_YUYV: @@ -807,17 +849,19 @@ vlv_update_plane(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; - u32 sprctl = plane_state->ctl; u32 sprsurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; unsigned long irqflags; + u32 sprctl; + + sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); /* Sizes are 0 based */ crtc_w--; @@ -883,21 +927,36 @@ vlv_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } +static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + u32 sprctl = 0; + + if (crtc_state->gamma_enable) + sprctl |= SPRITE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + sprctl |= SPRITE_PIPE_CSC_ENABLE; + + return sprctl; +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -908,14 +967,11 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; - sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE; + sprctl = SPRITE_ENABLE; if (IS_IVYBRIDGE(dev_priv)) sprctl |= SPRITE_TRICKLE_FEED_DISABLE; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - sprctl |= SPRITE_PIPE_CSC_ENABLE; - switch (fb->format->format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; @@ -967,20 +1023,22 @@ ivb_update_plane(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 sprctl = plane_state->ctl, sprscale = 0; u32 sprsurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 sprctl, sprscale = 0; unsigned long irqflags; + sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); + /* Sizes are 0 based */ src_w--; src_h--; @@ -1052,17 +1110,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -1075,6 +1135,19 @@ g4x_sprite_max_stride(struct intel_plane *plane, return 16384; } +static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + u32 dvscntr = 0; + + if (crtc_state->gamma_enable) + dvscntr |= DVS_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + dvscntr |= DVS_PIPE_CSC_ENABLE; + + return dvscntr; +} + static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1085,9 +1158,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 dvscntr; - dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE; + dvscntr = DVS_ENABLE; - if (IS_GEN6(dev_priv)) + if (IS_GEN(dev_priv, 6)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; switch (fb->format->format) { @@ -1141,20 +1214,22 @@ g4x_update_plane(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 dvscntr = plane_state->ctl, dvsscale = 0; u32 dvssurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 dvscntr, dvsscale = 0; unsigned long irqflags; + dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); + /* Sizes are 0 based */ src_w--; src_h--; @@ -1218,17 +1293,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -1443,8 +1520,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, /* * 90/270 is not allowed with RGB64 16:16:16:16 and * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. - * TBD: Add RGB64 case once its added in supported format - * list. */ switch (fb->format->format) { case DRM_FORMAT_RGB565: @@ -1452,6 +1527,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, break; /* fall through */ case DRM_FORMAT_C8: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", drm_get_format_name(fb->format->format, &format_name)); @@ -1512,10 +1596,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s int src_w = drm_rect_width(&plane_state->base.src) >> 16; /* Display WA #1106 */ - if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 && + if (is_planar_yuv_format(fb->format->format) && src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { - DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n"); + DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); return -EINVAL; } @@ -1699,7 +1783,7 @@ out: return ret; } -static const uint32_t g4x_plane_formats[] = { +static const u32 g4x_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -1707,13 +1791,13 @@ static const uint32_t g4x_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint64_t i9xx_plane_format_modifiers[] = { +static const u64 i9xx_plane_format_modifiers[] = { I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; -static const uint32_t snb_plane_formats[] = { +static const u32 snb_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, @@ -1722,7 +1806,7 @@ static const uint32_t snb_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t vlv_plane_formats[] = { +static const u32 vlv_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, @@ -1736,7 +1820,22 @@ static const uint32_t vlv_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t skl_plane_formats[] = { +static const u32 skl_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + +static const u32 icl_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1749,9 +1848,40 @@ static const uint32_t skl_plane_formats[] = { DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; -static const uint32_t skl_planar_formats[] = { +static const u32 icl_hdr_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_ARGB16161616F, + DRM_FORMAT_ABGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1767,7 +1897,80 @@ static const uint32_t skl_planar_formats[] = { DRM_FORMAT_NV12, }; -static const uint64_t skl_plane_format_modifiers_noccs[] = { +static const u32 glk_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, +}; + +static const u32 icl_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u32 icl_hdr_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_ARGB16161616F, + DRM_FORMAT_ABGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u64 skl_plane_format_modifiers_noccs[] = { I915_FORMAT_MOD_Yf_TILED, I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, @@ -1775,7 +1978,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = { DRM_FORMAT_MOD_INVALID }; -static const uint64_t skl_plane_format_modifiers_ccs[] = { +static const u64 skl_plane_format_modifiers_ccs[] = { I915_FORMAT_MOD_Yf_TILED_CCS, I915_FORMAT_MOD_Y_TILED_CCS, I915_FORMAT_MOD_Yf_TILED, @@ -1906,10 +2109,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_XVYU2101010: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; /* fall through */ case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) @@ -1983,7 +2199,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) return false; - if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) return false; if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) @@ -2046,8 +2262,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->update_slave = icl_update_slave; if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - formats = skl_planar_formats; - num_formats = ARRAY_SIZE(skl_planar_formats); + if (icl_is_hdr_plane(dev_priv, plane_id)) { + formats = icl_hdr_planar_formats; + num_formats = ARRAY_SIZE(icl_hdr_planar_formats); + } else if (INTEL_GEN(dev_priv) >= 11) { + formats = icl_planar_formats; + num_formats = ARRAY_SIZE(icl_planar_formats); + } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) { + formats = glk_planar_formats; + num_formats = ARRAY_SIZE(glk_planar_formats); + } else { + formats = skl_planar_formats; + num_formats = ARRAY_SIZE(skl_planar_formats); + } + } else if (icl_is_hdr_plane(dev_priv, plane_id)) { + formats = icl_hdr_plane_formats; + num_formats = ARRAY_SIZE(icl_hdr_plane_formats); + } else if (INTEL_GEN(dev_priv) >= 11) { + formats = icl_plane_formats; + num_formats = ARRAY_SIZE(icl_plane_formats); } else { formats = skl_plane_formats; num_formats = ARRAY_SIZE(skl_plane_formats); @@ -2163,7 +2396,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->check_plane = g4x_sprite_check; modifiers = i9xx_plane_format_modifiers; - if (IS_GEN6(dev_priv)) { + if (IS_GEN(dev_priv, 6)) { formats = snb_plane_formats; num_formats = ARRAY_SIZE(snb_plane_formats); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 860f306a23ba..3924c4944e1f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -30,7 +30,6 @@ * Integrated TV-out support for the 915GM and 945GM. */ -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -307,7 +306,7 @@ struct tv_mode { u32 clock; u16 refresh; /* in millihertz (for precision) */ - u32 oversample; + u8 oversample; u8 hsync_end; u16 hblank_start, hblank_end, htotal; bool progressive : 1, trilevel_sync : 1, component_only : 1; @@ -340,7 +339,6 @@ struct tv_mode { const struct video_levels *composite_levels, *svideo_levels; const struct color_conversion *composite_color, *svideo_color; const u32 *filter_table; - u16 max_srcw; }; @@ -379,8 +377,8 @@ static const struct tv_mode tv_modes[] = { .name = "NTSC-M", .clock = 108000, .refresh = 59940, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, @@ -422,8 +420,8 @@ static const struct tv_mode tv_modes[] = { .name = "NTSC-443", .clock = 108000, .refresh = 59940, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, @@ -464,8 +462,8 @@ static const struct tv_mode tv_modes[] = { .name = "NTSC-J", .clock = 108000, .refresh = 59940, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, @@ -507,8 +505,8 @@ static const struct tv_mode tv_modes[] = { .name = "PAL-M", .clock = 108000, .refresh = 59940, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, @@ -551,8 +549,8 @@ static const struct tv_mode tv_modes[] = { .name = "PAL-N", .clock = 108000, .refresh = 50000, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, .hsync_end = 64, .hblank_end = 128, .hblank_start = 844, .htotal = 863, @@ -596,8 +594,8 @@ static const struct tv_mode tv_modes[] = { .name = "PAL", .clock = 108000, .refresh = 50000, - .oversample = TV_OVERSAMPLE_8X, - .component_only = 0, + .oversample = 8, + .component_only = false, .hsync_end = 64, .hblank_end = 142, .hblank_start = 844, .htotal = 863, @@ -636,10 +634,10 @@ static const struct tv_mode tv_modes[] = { }, { .name = "480p", - .clock = 107520, + .clock = 108000, .refresh = 59940, - .oversample = TV_OVERSAMPLE_4X, - .component_only = 1, + .oversample = 4, + .component_only = true, .hsync_end = 64, .hblank_end = 122, .hblank_start = 842, .htotal = 857, @@ -660,10 +658,10 @@ static const struct tv_mode tv_modes[] = { }, { .name = "576p", - .clock = 107520, + .clock = 108000, .refresh = 50000, - .oversample = TV_OVERSAMPLE_4X, - .component_only = 1, + .oversample = 4, + .component_only = true, .hsync_end = 64, .hblank_end = 139, .hblank_start = 859, .htotal = 863, @@ -684,10 +682,10 @@ static const struct tv_mode tv_modes[] = { }, { .name = "720p@60Hz", - .clock = 148800, + .clock = 148500, .refresh = 60000, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, + .oversample = 2, + .component_only = true, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1649, @@ -708,10 +706,10 @@ static const struct tv_mode tv_modes[] = { }, { .name = "720p@50Hz", - .clock = 148800, + .clock = 148500, .refresh = 50000, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, + .oversample = 2, + .component_only = true, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1979, @@ -729,14 +727,13 @@ static const struct tv_mode tv_modes[] = { .burst_ena = false, .filter_table = filter_table, - .max_srcw = 800 }, { .name = "1080i@50Hz", - .clock = 148800, + .clock = 148500, .refresh = 50000, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, + .oversample = 2, + .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2639, @@ -759,10 +756,10 @@ static const struct tv_mode tv_modes[] = { }, { .name = "1080i@60Hz", - .clock = 148800, + .clock = 148500, .refresh = 60000, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, + .oversample = 2, + .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, @@ -783,8 +780,115 @@ static const struct tv_mode tv_modes[] = { .filter_table = filter_table, }, + + { + .name = "1080p@30Hz", + .clock = 148500, + .refresh = 30000, + .oversample = 2, + .component_only = true, + + .hsync_end = 88, .hblank_end = 235, + .hblank_start = 2155, .htotal = 2199, + + .progressive = true, .trilevel_sync = true, + + .vsync_start_f1 = 8, .vsync_start_f2 = 8, + .vsync_len = 10, + + .veq_ena = false, .veq_start_f1 = 0, + .veq_start_f2 = 0, .veq_len = 0, + + .vi_end_f1 = 44, .vi_end_f2 = 44, + .nbr_end = 1079, + + .burst_ena = false, + + .filter_table = filter_table, + }, + + { + .name = "1080p@50Hz", + .clock = 148500, + .refresh = 50000, + .oversample = 1, + .component_only = true, + + .hsync_end = 88, .hblank_end = 235, + .hblank_start = 2155, .htotal = 2639, + + .progressive = true, .trilevel_sync = true, + + .vsync_start_f1 = 8, .vsync_start_f2 = 8, + .vsync_len = 10, + + .veq_ena = false, .veq_start_f1 = 0, + .veq_start_f2 = 0, .veq_len = 0, + + .vi_end_f1 = 44, .vi_end_f2 = 44, + .nbr_end = 1079, + + .burst_ena = false, + + .filter_table = filter_table, + }, + + { + .name = "1080p@60Hz", + .clock = 148500, + .refresh = 60000, + .oversample = 1, + .component_only = true, + + .hsync_end = 88, .hblank_end = 235, + .hblank_start = 2155, .htotal = 2199, + + .progressive = true, .trilevel_sync = true, + + .vsync_start_f1 = 8, .vsync_start_f2 = 8, + .vsync_len = 10, + + .veq_ena = false, .veq_start_f1 = 0, + .veq_start_f2 = 0, .veq_len = 0, + + .vi_end_f1 = 44, .vi_end_f2 = 44, + .nbr_end = 1079, + + .burst_ena = false, + + .filter_table = filter_table, + }, }; +struct intel_tv_connector_state { + struct drm_connector_state base; + + /* + * May need to override the user margins for + * gen3 >1024 wide source vertical centering. + */ + struct { + u16 top, bottom; + } margins; + + bool bypass_vfilter; +}; + +#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base) + +static struct drm_connector_state * +intel_tv_connector_duplicate_state(struct drm_connector *connector) +{ + struct intel_tv_connector_state *state; + + state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_connector_duplicate_state(connector, &state->base); + return &state->base; +} + static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) { return container_of(encoder, struct intel_tv, base); @@ -860,45 +964,370 @@ intel_tv_mode_valid(struct drm_connector *connector, return MODE_CLOCK_RANGE; } +static int +intel_tv_mode_vdisplay(const struct tv_mode *tv_mode) +{ + if (tv_mode->progressive) + return tv_mode->nbr_end + 1; + else + return 2 * (tv_mode->nbr_end + 1); +} + +static void +intel_tv_mode_to_mode(struct drm_display_mode *mode, + const struct tv_mode *tv_mode) +{ + mode->clock = tv_mode->clock / + (tv_mode->oversample >> !tv_mode->progressive); + + /* + * tv_mode horizontal timings: + * + * hsync_end + * | hblank_end + * | | hblank_start + * | | | htotal + * | _______ | + * ____/ \___ + * \__/ \ + */ + mode->hdisplay = + tv_mode->hblank_start - tv_mode->hblank_end; + mode->hsync_start = mode->hdisplay + + tv_mode->htotal - tv_mode->hblank_start; + mode->hsync_end = mode->hsync_start + + tv_mode->hsync_end; + mode->htotal = tv_mode->htotal + 1; + + /* + * tv_mode vertical timings: + * + * vsync_start + * | vsync_end + * | | vi_end nbr_end + * | | | | + * | | _______ + * \__ ____/ \ + * \__/ + */ + mode->vdisplay = intel_tv_mode_vdisplay(tv_mode); + if (tv_mode->progressive) { + mode->vsync_start = mode->vdisplay + + tv_mode->vsync_start_f1 + 1; + mode->vsync_end = mode->vsync_start + + tv_mode->vsync_len; + mode->vtotal = mode->vdisplay + + tv_mode->vi_end_f1 + 1; + } else { + mode->vsync_start = mode->vdisplay + + tv_mode->vsync_start_f1 + 1 + + tv_mode->vsync_start_f2 + 1; + mode->vsync_end = mode->vsync_start + + 2 * tv_mode->vsync_len; + mode->vtotal = mode->vdisplay + + tv_mode->vi_end_f1 + 1 + + tv_mode->vi_end_f2 + 1; + } + + /* TV has it's own notion of sync and other mode flags, so clear them. */ + mode->flags = 0; + + mode->vrefresh = 0; + mode->vrefresh = drm_mode_vrefresh(mode); + + snprintf(mode->name, sizeof(mode->name), + "%dx%d%c (%s)", + mode->hdisplay, mode->vdisplay, + tv_mode->progressive ? 'p' : 'i', + tv_mode->name); +} + +static void intel_tv_scale_mode_horiz(struct drm_display_mode *mode, + int hdisplay, int left_margin, + int right_margin) +{ + int hsync_start = mode->hsync_start - mode->hdisplay + right_margin; + int hsync_end = mode->hsync_end - mode->hdisplay + right_margin; + int new_htotal = mode->htotal * hdisplay / + (mode->hdisplay - left_margin - right_margin); + + mode->clock = mode->clock * new_htotal / mode->htotal; + + mode->hdisplay = hdisplay; + mode->hsync_start = hdisplay + hsync_start * new_htotal / mode->htotal; + mode->hsync_end = hdisplay + hsync_end * new_htotal / mode->htotal; + mode->htotal = new_htotal; +} + +static void intel_tv_scale_mode_vert(struct drm_display_mode *mode, + int vdisplay, int top_margin, + int bottom_margin) +{ + int vsync_start = mode->vsync_start - mode->vdisplay + bottom_margin; + int vsync_end = mode->vsync_end - mode->vdisplay + bottom_margin; + int new_vtotal = mode->vtotal * vdisplay / + (mode->vdisplay - top_margin - bottom_margin); + + mode->clock = mode->clock * new_vtotal / mode->vtotal; + + mode->vdisplay = vdisplay; + mode->vsync_start = vdisplay + vsync_start * new_vtotal / mode->vtotal; + mode->vsync_end = vdisplay + vsync_end * new_vtotal / mode->vtotal; + mode->vtotal = new_vtotal; +} static void intel_tv_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + struct drm_display_mode mode = {}; + u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp; + struct tv_mode tv_mode = {}; + int hdisplay = adjusted_mode->crtc_hdisplay; + int vdisplay = adjusted_mode->crtc_vdisplay; + int xsize, ysize, xpos, ypos; + pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT); - pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; + tv_ctl = I915_READ(TV_CTL); + hctl1 = I915_READ(TV_H_CTL_1); + hctl3 = I915_READ(TV_H_CTL_3); + vctl1 = I915_READ(TV_V_CTL_1); + vctl2 = I915_READ(TV_V_CTL_2); + + tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT; + tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT; + + tv_mode.hblank_start = (hctl3 & TV_HBLANK_START_MASK) >> TV_HBLANK_START_SHIFT; + tv_mode.hblank_end = (hctl3 & TV_HSYNC_END_MASK) >> TV_HBLANK_END_SHIFT; + + tv_mode.nbr_end = (vctl1 & TV_NBR_END_MASK) >> TV_NBR_END_SHIFT; + tv_mode.vi_end_f1 = (vctl1 & TV_VI_END_F1_MASK) >> TV_VI_END_F1_SHIFT; + tv_mode.vi_end_f2 = (vctl1 & TV_VI_END_F2_MASK) >> TV_VI_END_F2_SHIFT; + + tv_mode.vsync_len = (vctl2 & TV_VSYNC_LEN_MASK) >> TV_VSYNC_LEN_SHIFT; + tv_mode.vsync_start_f1 = (vctl2 & TV_VSYNC_START_F1_MASK) >> TV_VSYNC_START_F1_SHIFT; + tv_mode.vsync_start_f2 = (vctl2 & TV_VSYNC_START_F2_MASK) >> TV_VSYNC_START_F2_SHIFT; + + tv_mode.clock = pipe_config->port_clock; + + tv_mode.progressive = tv_ctl & TV_PROGRESSIVE; + + switch (tv_ctl & TV_OVERSAMPLE_MASK) { + case TV_OVERSAMPLE_8X: + tv_mode.oversample = 8; + break; + case TV_OVERSAMPLE_4X: + tv_mode.oversample = 4; + break; + case TV_OVERSAMPLE_2X: + tv_mode.oversample = 2; + break; + default: + tv_mode.oversample = 1; + break; + } + + tmp = I915_READ(TV_WIN_POS); + xpos = tmp >> 16; + ypos = tmp & 0xffff; + + tmp = I915_READ(TV_WIN_SIZE); + xsize = tmp >> 16; + ysize = tmp & 0xffff; + + intel_tv_mode_to_mode(&mode, &tv_mode); + + DRM_DEBUG_KMS("TV mode:\n"); + drm_mode_debug_printmodeline(&mode); + + intel_tv_scale_mode_horiz(&mode, hdisplay, + xpos, mode.hdisplay - xsize - xpos); + intel_tv_scale_mode_vert(&mode, vdisplay, + ypos, mode.vdisplay - ysize - ypos); + + adjusted_mode->crtc_clock = mode.clock; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + adjusted_mode->crtc_clock /= 2; + + /* pixel counter doesn't work on i965gm TV output */ + if (IS_I965GM(dev_priv)) + adjusted_mode->private_flags |= + I915_MODE_FLAG_USE_SCANLINE_COUNTER; } -static bool +static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv, + int hdisplay) +{ + return IS_GEN(dev_priv, 3) && hdisplay > 1024; +} + +static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode, + const struct drm_connector_state *conn_state, + int vdisplay) +{ + return tv_mode->crtc_vdisplay - + conn_state->tv.margins.top - + conn_state->tv.margins.bottom != + vdisplay; +} + +static int intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_tv_connector_state *tv_conn_state = + to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int hdisplay = adjusted_mode->crtc_hdisplay; + int vdisplay = adjusted_mode->crtc_vdisplay; if (!tv_mode) - return false; + return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - adjusted_mode->crtc_clock = tv_mode->clock; + DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; - /* TV has it's own notion of sync and other mode flags, so clear them. */ - adjusted_mode->flags = 0; + pipe_config->port_clock = tv_mode->clock; + + intel_tv_mode_to_mode(adjusted_mode, tv_mode); + drm_mode_set_crtcinfo(adjusted_mode, 0); + + if (intel_tv_source_too_wide(dev_priv, hdisplay) || + !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) { + int extra, top, bottom; + + extra = adjusted_mode->crtc_vdisplay - vdisplay; + + if (extra < 0) { + DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n"); + return -EINVAL; + } + + /* Need to turn off the vertical filter and center the image */ + + /* Attempt to maintain the relative sizes of the margins */ + top = conn_state->tv.margins.top; + bottom = conn_state->tv.margins.bottom; + + if (top + bottom) + top = extra * top / (top + bottom); + else + top = extra / 2; + bottom = extra - top; + + tv_conn_state->margins.top = top; + tv_conn_state->margins.bottom = bottom; + + tv_conn_state->bypass_vfilter = true; + + if (!tv_mode->progressive) { + adjusted_mode->clock /= 2; + adjusted_mode->crtc_clock /= 2; + adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + } else { + tv_conn_state->margins.top = conn_state->tv.margins.top; + tv_conn_state->margins.bottom = conn_state->tv.margins.bottom; + + tv_conn_state->bypass_vfilter = false; + } + + DRM_DEBUG_KMS("TV mode:\n"); + drm_mode_debug_printmodeline(adjusted_mode); /* - * FIXME: We don't check whether the input mode is actually what we want - * or whether userspace is doing something stupid. + * The pipe scanline counter behaviour looks as follows when + * using the TV encoder: + * + * time -> + * + * dsl=vtotal-1 | | + * || || + * ___| | ___| | + * / | / | + * / | / | + * dsl=0 ___/ |_____/ | + * | | | | | | + * ^ ^ ^ ^ ^ + * | | | | pipe vblank/first part of tv vblank + * | | | bottom margin + * | | active + * | top margin + * remainder of tv vblank + * + * When the TV encoder is used the pipe wants to run faster + * than expected rate. During the active portion the TV + * encoder stalls the pipe every few lines to keep it in + * check. When the TV encoder reaches the bottom margin the + * pipe simply stops. Once we reach the TV vblank the pipe is + * no longer stalled and it runs at the max rate (apparently + * oversample clock on gen3, cdclk on gen4). Once the pipe + * reaches the pipe vtotal the pipe stops for the remainder + * of the TV vblank/top margin. The pipe starts up again when + * the TV encoder exits the top margin. + * + * To avoid huge hassles for vblank timestamping we scale + * the pipe timings as if the pipe always runs at the average + * rate it maintains during the active period. This also + * gives us a reasonable guesstimate as to the pixel rate. + * Due to the variation in the actual pipe speed the scanline + * counter will give us slightly erroneous results during the + * TV vblank/margins. But since vtotal was selected such that + * it matches the average rate of the pipe during the active + * portion the error shouldn't cause any serious grief to + * vblank timestamps. + * + * For posterity here is the empirically derived formula + * that gives us the maximum length of the pipe vblank + * we can use without causing display corruption. Following + * this would allow us to have a ticking scanline counter + * everywhere except during the bottom margin (there the + * pipe always stops). Ie. this would eliminate the second + * flat portion of the above graph. However this would also + * complicate vblank timestamping as the pipe vtotal would + * no longer match the average rate the pipe runs at during + * the active portion. Hence following this formula seems + * more trouble that it's worth. + * + * if (IS_GEN(dev_priv, 4)) { + * num = cdclk * (tv_mode->oversample >> !tv_mode->progressive); + * den = tv_mode->clock; + * } else { + * num = tv_mode->oversample >> !tv_mode->progressive; + * den = 1; + * } + * max_pipe_vblank_len ~= + * (num * tv_htotal * (tv_vblank_len + top_margin)) / + * (den * pipe_htotal); */ + intel_tv_scale_mode_horiz(adjusted_mode, hdisplay, + conn_state->tv.margins.left, + conn_state->tv.margins.right); + intel_tv_scale_mode_vert(adjusted_mode, vdisplay, + tv_conn_state->margins.top, + tv_conn_state->margins.bottom); + drm_mode_set_crtcinfo(adjusted_mode, 0); + adjusted_mode->name[0] = '\0'; + + /* pixel counter doesn't work on i965gm TV output */ + if (IS_I965GM(dev_priv)) + adjusted_mode->private_flags |= + I915_MODE_FLAG_USE_SCANLINE_COUNTER; - return true; + return 0; } static void @@ -987,14 +1416,16 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); + const struct intel_tv_connector_state *tv_conn_state = + to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); - u32 tv_ctl; + u32 tv_ctl, tv_filter_ctl; u32 scctl1, scctl2, scctl3; int i, j; const struct video_levels *video_levels; const struct color_conversion *color_conversion; bool burst_ena; - int xpos = 0x0, ypos = 0x0; + int xpos, ypos; unsigned int xsize, ysize; if (!tv_mode) @@ -1030,7 +1461,21 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, } tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe); - tv_ctl |= tv_mode->oversample; + + switch (tv_mode->oversample) { + case 8: + tv_ctl |= TV_OVERSAMPLE_8X; + break; + case 4: + tv_ctl |= TV_OVERSAMPLE_4X; + break; + case 2: + tv_ctl |= TV_OVERSAMPLE_2X; + break; + default: + tv_ctl |= TV_OVERSAMPLE_NONE; + break; + } if (tv_mode->progressive) tv_ctl |= TV_PROGRESSIVE; @@ -1082,19 +1527,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, assert_pipe_disabled(dev_priv, intel_crtc->pipe); /* Filter ctl must be set before TV_WIN_SIZE */ - I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); + tv_filter_ctl = TV_AUTO_SCALE; + if (tv_conn_state->bypass_vfilter) + tv_filter_ctl |= TV_V_FILTER_BYPASS; + I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl); + xsize = tv_mode->hblank_start - tv_mode->hblank_end; - if (tv_mode->progressive) - ysize = tv_mode->nbr_end + 1; - else - ysize = 2*tv_mode->nbr_end + 1; + ysize = intel_tv_mode_vdisplay(tv_mode); - xpos += conn_state->tv.margins.left; - ypos += conn_state->tv.margins.top; + xpos = conn_state->tv.margins.left; + ypos = tv_conn_state->margins.top; xsize -= (conn_state->tv.margins.left + conn_state->tv.margins.right); - ysize -= (conn_state->tv.margins.top + - conn_state->tv.margins.bottom); + ysize -= (tv_conn_state->margins.top + + tv_conn_state->margins.bottom); I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); @@ -1111,23 +1557,6 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, I915_WRITE(TV_CTL, tv_ctl); } -static const struct drm_display_mode reported_modes[] = { - { - .name = "NTSC 480i", - .clock = 107520, - .hdisplay = 1280, - .hsync_start = 1368, - .hsync_end = 1496, - .htotal = 1712, - - .vdisplay = 1024, - .vsync_start = 1027, - .vsync_end = 1034, - .vtotal = 1104, - .type = DRM_MODE_TYPE_DRIVER, - }, -}; - static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) @@ -1234,16 +1663,18 @@ static void intel_tv_find_better_format(struct drm_connector *connector) const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i; - if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == - tv_mode->component_only) + /* Component supports everything so we can keep the current mode */ + if (intel_tv->type == DRM_MODE_CONNECTOR_Component) return; + /* If the current mode is fine don't change it */ + if (!tv_mode->component_only) + return; for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { - tv_mode = tv_modes + i; + tv_mode = &tv_modes[i]; - if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == - tv_mode->component_only) + if (!tv_mode->component_only) break; } @@ -1255,7 +1686,6 @@ intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { - struct drm_display_mode mode; struct intel_tv *intel_tv = intel_attached_tv(connector); enum drm_connector_status status; int type; @@ -1264,13 +1694,11 @@ intel_tv_detect(struct drm_connector *connector, connector->base.id, connector->name, force); - mode = reported_modes[0]; - if (force) { struct intel_load_detect_pipe tmp; int ret; - ret = intel_get_load_detect_pipe(connector, &mode, &tmp, ctx); + ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); if (ret < 0) return ret; @@ -1294,84 +1722,85 @@ intel_tv_detect(struct drm_connector *connector, } static const struct input_res { - const char *name; - int w, h; + u16 w, h; } input_res_table[] = { - {"640x480", 640, 480}, - {"800x600", 800, 600}, - {"1024x768", 1024, 768}, - {"1280x1024", 1280, 1024}, - {"848x480", 848, 480}, - {"1280x720", 1280, 720}, - {"1920x1080", 1920, 1080}, + { 640, 480 }, + { 800, 600 }, + { 1024, 768 }, + { 1280, 1024 }, + { 848, 480 }, + { 1280, 720 }, + { 1920, 1080 }, }; -/* - * Chose preferred mode according to line number of TV format - */ +/* Choose preferred mode according to line number of TV format */ +static bool +intel_tv_is_preferred_mode(const struct drm_display_mode *mode, + const struct tv_mode *tv_mode) +{ + int vdisplay = intel_tv_mode_vdisplay(tv_mode); + + /* prefer 480 line modes for all SD TV modes */ + if (vdisplay <= 576) + vdisplay = 480; + + return vdisplay == mode->vdisplay; +} + static void -intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode, - struct drm_display_mode *mode_ptr) +intel_tv_set_mode_type(struct drm_display_mode *mode, + const struct tv_mode *tv_mode) { - if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) - mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; - else if (tv_mode->nbr_end > 480) { - if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { - if (mode_ptr->vdisplay == 720) - mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; - } else if (mode_ptr->vdisplay == 1080) - mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; - } + mode->type = DRM_MODE_TYPE_DRIVER; + + if (intel_tv_is_preferred_mode(mode, tv_mode)) + mode->type |= DRM_MODE_TYPE_PREFERRED; } static int intel_tv_get_modes(struct drm_connector *connector) { - struct drm_display_mode *mode_ptr; + struct drm_i915_private *dev_priv = to_i915(connector->dev); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); - int j, count = 0; - u64 tmp; + int i, count = 0; - for (j = 0; j < ARRAY_SIZE(input_res_table); - j++) { - const struct input_res *input = &input_res_table[j]; - unsigned int hactive_s = input->w; - unsigned int vactive_s = input->h; + for (i = 0; i < ARRAY_SIZE(input_res_table); i++) { + const struct input_res *input = &input_res_table[i]; + struct drm_display_mode *mode; - if (tv_mode->max_srcw && input->w > tv_mode->max_srcw) + if (input->w > 1024 && + !tv_mode->progressive && + !tv_mode->component_only) continue; - if (input->w > 1024 && (!tv_mode->progressive - && !tv_mode->component_only)) + /* no vertical scaling with wide sources on gen3 */ + if (IS_GEN(dev_priv, 3) && input->w > 1024 && + input->h > intel_tv_mode_vdisplay(tv_mode)) continue; - mode_ptr = drm_mode_create(connector->dev); - if (!mode_ptr) + mode = drm_mode_create(connector->dev); + if (!mode) continue; - strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); - - mode_ptr->hdisplay = hactive_s; - mode_ptr->hsync_start = hactive_s + 1; - mode_ptr->hsync_end = hactive_s + 64; - if (mode_ptr->hsync_end <= mode_ptr->hsync_start) - mode_ptr->hsync_end = mode_ptr->hsync_start + 1; - mode_ptr->htotal = hactive_s + 96; - - mode_ptr->vdisplay = vactive_s; - mode_ptr->vsync_start = vactive_s + 1; - mode_ptr->vsync_end = vactive_s + 32; - if (mode_ptr->vsync_end <= mode_ptr->vsync_start) - mode_ptr->vsync_end = mode_ptr->vsync_start + 1; - mode_ptr->vtotal = vactive_s + 33; - - tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal); - tmp *= mode_ptr->htotal; - tmp = div_u64(tmp, 1000000); - mode_ptr->clock = (int) tmp; - - mode_ptr->type = DRM_MODE_TYPE_DRIVER; - intel_tv_choose_preferred_modes(tv_mode, mode_ptr); - drm_mode_probed_add(connector, mode_ptr); + + /* + * We take the TV mode and scale it to look + * like it had the expected h/vdisplay. This + * provides the most information to userspace + * about the actual timings of the mode. We + * do ignore the margins though. + */ + intel_tv_mode_to_mode(mode, tv_mode); + if (count == 0) { + DRM_DEBUG_KMS("TV mode:\n"); + drm_mode_debug_printmodeline(mode); + } + intel_tv_scale_mode_horiz(mode, input->w, 0, 0); + intel_tv_scale_mode_vert(mode, input->h, 0, 0); + intel_tv_set_mode_type(mode, tv_mode); + + drm_mode_set_name(mode); + + drm_mode_probed_add(connector, mode); count++; } @@ -1384,7 +1813,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_duplicate_state = intel_tv_connector_duplicate_state, }; static int intel_tv_atomic_check(struct drm_connector *connector, @@ -1531,11 +1960,15 @@ intel_tv_init(struct drm_i915_private *dev_priv) connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - for (i = 0; i < ARRAY_SIZE(tv_modes); i++) + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { + /* 1080p50/1080p60 not supported on gen3 */ + if (IS_GEN(dev_priv, 3) && + tv_modes[i].oversample == 1) + break; + tv_format_names[i] = tv_modes[i].name; - drm_mode_create_tv_properties(dev, - ARRAY_SIZE(tv_modes), - tv_format_names); + } + drm_mode_create_tv_properties(dev, i, tv_format_names); drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, state->tv.mode); diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index b34c318b238d..25b80ffe71ad 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -26,6 +26,7 @@ #include "intel_guc_submission.h" #include "intel_guc.h" #include "i915_drv.h" +#include "i915_reset.h" static void guc_free_load_err_log(struct intel_guc *guc); @@ -71,7 +72,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) { int guc_log_level; - if (!HAS_GUC(i915) || !intel_uc_is_using_guc()) + if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915)) guc_log_level = GUC_LOG_LEVEL_DISABLED; else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) @@ -112,11 +113,11 @@ static void sanitize_options_early(struct drm_i915_private *i915) DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", i915_modparams.enable_guc, - yesno(intel_uc_is_using_guc_submission()), - yesno(intel_uc_is_using_huc())); + yesno(intel_uc_is_using_guc_submission(i915)), + yesno(intel_uc_is_using_huc(i915))); /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) { + if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, !HAS_GUC(i915) ? "no GuC hardware" : @@ -124,7 +125,7 @@ static void sanitize_options_early(struct drm_i915_private *i915) } /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) { + if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, !HAS_HUC(i915) ? "no HuC hardware" : @@ -136,7 +137,7 @@ static void sanitize_options_early(struct drm_i915_private *i915) i915_modparams.guc_log_level = __get_default_guc_log_level(i915); - if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) { + if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "guc_log_level", i915_modparams.guc_log_level, !HAS_GUC(i915) ? "no GuC hardware" : @@ -331,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915) GEM_BUG_ON(!HAS_GUC(i915)); - guc_disable_communication(guc); - intel_huc_sanitize(huc); intel_guc_sanitize(guc); @@ -354,7 +353,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN9(i915)) + if (IS_GEN(i915, 9)) attempts = 3; else attempts = 1; @@ -376,7 +375,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) intel_guc_init_params(guc); ret = intel_guc_fw_upload(guc); - if (ret == 0 || ret != -ETIMEDOUT) + if (ret == 0) break; DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " @@ -450,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915) guc_disable_communication(guc); } +/** + * intel_uc_reset_prepare - Prepare for reset + * @i915: device private + * + * Preparing for full gpu reset. + */ +void intel_uc_reset_prepare(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + if (!USES_GUC(i915)) + return; + + guc_disable_communication(guc); + intel_uc_sanitize(i915); +} + int intel_uc_suspend(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; @@ -467,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915) return err; } - gen9_disable_guc_interrupts(i915); + guc_disable_communication(guc); return 0; } @@ -483,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - gen9_enable_guc_interrupts(i915); + guc_enable_communication(guc); err = intel_guc_resume(guc); if (err) { diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 25d73ada74ae..c14729786652 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -38,22 +38,23 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv); void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_uc_init(struct drm_i915_private *dev_priv); void intel_uc_fini(struct drm_i915_private *dev_priv); +void intel_uc_reset_prepare(struct drm_i915_private *i915); int intel_uc_suspend(struct drm_i915_private *dev_priv); int intel_uc_resume(struct drm_i915_private *dev_priv); -static inline bool intel_uc_is_using_guc(void) +static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc > 0; } -static inline bool intel_uc_is_using_guc_submission(void) +static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; } -static inline bool intel_uc_is_using_huc(void) +static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index fd496416087c..becf05ebae4d 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, size_t size; int err; + if (!uc_fw->path) { + dev_info(dev_priv->drm.dev, + "%s: No firmware was defined for %s!\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_platform_name(INTEL_INFO(dev_priv)->platform)); + return; + } + DRM_DEBUG_DRIVER("%s fw fetch %s\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - if (!uc_fw->path) - return; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("%s fw fetch %s\n", intel_uc_fw_type_repr(uc_fw->type), diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9289515108c3..a01d1b352a7c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -31,7 +31,7 @@ #define FORCEWAKE_ACK_TIMEOUT_MS 50 #define GT_FIFO_TIMEOUT_MS 10 -#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) +#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) static const char * const forcewake_domain_names[] = { "render", @@ -58,16 +58,20 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) return "unknown"; } +#define fw_ack(d) readl((d)->reg_ack) +#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) +#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) + static inline void -fw_domain_reset(struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_reset(const struct intel_uncore_forcewake_domain *d) { /* * We don't really know if the powerwell for the forcewake domain we are * trying to reset here does exist at this point (engines could be fused * off in ICL+), so no waiting for acks */ - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); + /* WaRsClearFWBitsAtReset:bdw,skl */ + fw_clear(d, 0xffff); } static inline void @@ -81,36 +85,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) } static inline int -__wait_for_ack(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +__wait_for_ack(const struct intel_uncore_forcewake_domain *d, const u32 ack, const u32 value) { - return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value, + return wait_for_atomic((fw_ack(d) & ack) == value, FORCEWAKE_ACK_TIMEOUT_MS); } static inline int -wait_ack_clear(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +wait_ack_clear(const struct intel_uncore_forcewake_domain *d, const u32 ack) { - return __wait_for_ack(i915, d, ack, 0); + return __wait_for_ack(d, ack, 0); } static inline int -wait_ack_set(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +wait_ack_set(const struct intel_uncore_forcewake_domain *d, const u32 ack) { - return __wait_for_ack(i915, d, ack, ack); + return __wait_for_ack(d, ack, ack); } static inline void -fw_domain_wait_ack_clear(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) { - if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL)) + if (wait_ack_clear(d, FORCEWAKE_KERNEL)) DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", intel_uncore_forcewake_domain_to_str(d->id)); } @@ -121,8 +121,7 @@ enum ack_type { }; static int -fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, const enum ack_type type) { const u32 ack_bit = FORCEWAKE_KERNEL; @@ -146,129 +145,122 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, pass = 1; do { - wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK); + wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); - __raw_i915_write32(i915, d->reg_set, - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK)); + fw_set(d, FORCEWAKE_KERNEL_FALLBACK); /* Give gt some time to relax before the polling frenzy */ udelay(10 * pass); - wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK); + wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); - ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value; + ack_detected = (fw_ack(d) & ack_bit) == value; - __raw_i915_write32(i915, d->reg_set, - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK)); + fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); } while (!ack_detected && pass++ < 10); DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET ? "set" : "clear", - __raw_i915_read32(i915, d->reg_ack), + fw_ack(d), pass); return ack_detected ? 0 : -ETIMEDOUT; } static inline void -fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) { - if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL))) + if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) return; - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR)) - fw_domain_wait_ack_clear(i915, d); + if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) + fw_domain_wait_ack_clear(d); } static inline void -fw_domain_get(struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_get(const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); + fw_set(d, FORCEWAKE_KERNEL); } static inline void -fw_domain_wait_ack_set(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) { - if (wait_ack_set(i915, d, FORCEWAKE_KERNEL)) + if (wait_ack_set(d, FORCEWAKE_KERNEL)) DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", intel_uncore_forcewake_domain_to_str(d->id)); } static inline void -fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) { - if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL))) + if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) return; - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET)) - fw_domain_wait_ack_set(i915, d); + if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) + fw_domain_wait_ack_set(d); } static inline void -fw_domain_put(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_put(const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); + fw_clear(d, FORCEWAKE_KERNEL); } static void -fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) +fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { - fw_domain_wait_ack_clear(i915, d); - fw_domain_get(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { + fw_domain_wait_ack_clear(d); + fw_domain_get(d); } - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_wait_ack_set(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_wait_ack_set(d); - i915->uncore.fw_domains_active |= fw_domains; + uncore->fw_domains_active |= fw_domains; } static void -fw_domains_get_with_fallback(struct drm_i915_private *i915, +fw_domains_get_with_fallback(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { - fw_domain_wait_ack_clear_fallback(i915, d); - fw_domain_get(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { + fw_domain_wait_ack_clear_fallback(d); + fw_domain_get(d); } - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_wait_ack_set_fallback(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_wait_ack_set_fallback(d); - i915->uncore.fw_domains_active |= fw_domains; + uncore->fw_domains_active |= fw_domains; } static void -fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) +fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_put(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_put(d); - i915->uncore.fw_domains_active &= ~fw_domains; + uncore->fw_domains_active &= ~fw_domains; } static void -fw_domains_reset(struct drm_i915_private *i915, +fw_domains_reset(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; @@ -277,61 +269,61 @@ fw_domains_reset(struct drm_i915_private *i915, if (!fw_domains) return; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_reset(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_reset(d); } -static inline u32 gt_thread_status(struct drm_i915_private *dev_priv) +static inline u32 gt_thread_status(struct intel_uncore *uncore) { u32 val; - val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG); + val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); val &= GEN6_GT_THREAD_STATUS_CORE_MASK; return val; } -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) { /* * w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000), + WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), "GT thread status wait timed out\n"); } -static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, +static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - fw_domains_get(dev_priv, fw_domains); + fw_domains_get(uncore, fw_domains); /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); + __gen6_gt_wait_for_thread_c0(uncore); } -static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) +static inline u32 fifo_free_entries(struct intel_uncore *uncore) { - u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); + u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); return count & GT_FIFO_FREE_ENTRIES_MASK; } -static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) { u32 n; /* On VLV, FIFO will be shared by both SW and HW. * So, we need to read the FREE_ENTRIES everytime */ - if (IS_VALLEYVIEW(dev_priv)) - n = fifo_free_entries(dev_priv); + if (IS_VALLEYVIEW(uncore_to_i915(uncore))) + n = fifo_free_entries(uncore); else - n = dev_priv->uncore.fifo_count; + n = uncore->fifo_count; if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { - if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > + if (wait_for_atomic((n = fifo_free_entries(uncore)) > GT_FIFO_NUM_RESERVED_ENTRIES, GT_FIFO_TIMEOUT_MS)) { DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); @@ -339,7 +331,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) } } - dev_priv->uncore.fifo_count = n - 1; + uncore->fifo_count = n - 1; } static enum hrtimer_restart @@ -347,30 +339,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) { struct intel_uncore_forcewake_domain *domain = container_of(timer, struct intel_uncore_forcewake_domain, timer); - struct drm_i915_private *dev_priv = - container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); + struct intel_uncore *uncore = forcewake_domain_to_uncore(domain); unsigned long irqflags; - assert_rpm_device_not_suspended(dev_priv); + assert_rpm_device_not_suspended(uncore->rpm); if (xchg(&domain->active, false)) return HRTIMER_RESTART; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); if (WARN_ON(domain->wake_count == 0)) domain->wake_count++; if (--domain->wake_count == 0) - dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); + uncore->funcs.force_wake_put(uncore, domain->mask); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); return HRTIMER_NORESTART; } /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ static unsigned int -intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) +intel_uncore_forcewake_reset(struct intel_uncore *uncore) { unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; @@ -388,7 +379,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) active_domains = 0; - for_each_fw_domain(domain, dev_priv, tmp) { + for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); if (hrtimer_cancel(&domain->timer) == 0) continue; @@ -396,9 +387,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) intel_uncore_fw_release_timer(&domain->timer); } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); - for_each_fw_domain(domain, dev_priv, tmp) { + for_each_fw_domain(domain, uncore, tmp) { if (hrtimer_active(&domain->timer)) active_domains |= domain->mask; } @@ -411,185 +402,134 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) break; } - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); cond_resched(); } WARN_ON(active_domains); - fw = dev_priv->uncore.fw_domains_active; + fw = uncore->fw_domains_active; if (fw) - dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); + uncore->funcs.force_wake_put(uncore, fw); - fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); - assert_forcewakes_inactive(dev_priv); + fw_domains_reset(uncore, uncore->fw_domains); + assert_forcewakes_inactive(uncore); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); return fw; /* track the lost user forcewake domains */ } -static u64 gen9_edram_size(struct drm_i915_private *dev_priv) -{ - const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; - const unsigned int sets[4] = { 1, 1, 2, 2 }; - const u32 cap = dev_priv->edram_cap; - - return EDRAM_NUM_BANKS(cap) * - ways[EDRAM_WAYS_IDX(cap)] * - sets[EDRAM_SETS_IDX(cap)] * - 1024 * 1024; -} - -u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) -{ - if (!HAS_EDRAM(dev_priv)) - return 0; - - /* The needed capability bits for size calculation - * are not there with pre gen9 so return 128MB always. - */ - if (INTEL_GEN(dev_priv) < 9) - return 128 * 1024 * 1024; - - return gen9_edram_size(dev_priv); -} - -static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) -{ - if (IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv) || - INTEL_GEN(dev_priv) >= 9) { - dev_priv->edram_cap = __raw_i915_read32(dev_priv, - HSW_EDRAM_CAP); - - /* NB: We can't write IDICR yet because we do not have gt funcs - * set up */ - } else { - dev_priv->edram_cap = 0; - } - - if (HAS_EDRAM(dev_priv)) - DRM_INFO("Found %lluMB of eDRAM\n", - intel_uncore_edram_size(dev_priv) / (1024 * 1024)); -} - static bool -fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) { u32 dbg; - dbg = __raw_i915_read32(dev_priv, FPGA_DBG); + dbg = __raw_uncore_read32(uncore, FPGA_DBG); if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) return false; - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); return true; } static bool -vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) { u32 cer; - cer = __raw_i915_read32(dev_priv, CLAIM_ER); + cer = __raw_uncore_read32(uncore, CLAIM_ER); if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) return false; - __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); + __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); return true; } static bool -gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) +gen6_check_for_fifo_debug(struct intel_uncore *uncore) { u32 fifodbg; - fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); + fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); if (unlikely(fifodbg)) { DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); - __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); + __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); } return fifodbg; } static bool -check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +check_for_unclaimed_mmio(struct intel_uncore *uncore) { bool ret = false; - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) - ret |= fpga_check_for_unclaimed_mmio(dev_priv); + if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) + ret |= fpga_check_for_unclaimed_mmio(uncore); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret |= vlv_check_for_unclaimed_mmio(dev_priv); + if (intel_uncore_has_dbg_unclaimed(uncore)) + ret |= vlv_check_for_unclaimed_mmio(uncore); - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) - ret |= gen6_check_for_fifo_debug(dev_priv); + if (intel_uncore_has_fifo(uncore)) + ret |= gen6_check_for_fifo_debug(uncore); return ret; } -static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, +static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, unsigned int restore_forcewake) { /* clear out unclaimed reg detection bit */ - if (check_for_unclaimed_mmio(dev_priv)) + if (check_for_unclaimed_mmio(uncore)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev_priv)) { - __raw_i915_write32(dev_priv, GTFIFOCTL, - __raw_i915_read32(dev_priv, GTFIFOCTL) | - GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | - GT_FIFO_CTL_RC6_POLICY_STALL); + if (IS_CHERRYVIEW(uncore_to_i915(uncore))) { + __raw_uncore_write32(uncore, GTFIFOCTL, + __raw_uncore_read32(uncore, GTFIFOCTL) | + GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | + GT_FIFO_CTL_RC6_POLICY_STALL); } iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv); + intel_uncore_forcewake_reset(uncore); if (restore_forcewake) { - spin_lock_irq(&dev_priv->uncore.lock); - dev_priv->uncore.funcs.force_wake_get(dev_priv, - restore_forcewake); - - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) - dev_priv->uncore.fifo_count = - fifo_free_entries(dev_priv); - spin_unlock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); + uncore->funcs.force_wake_get(uncore, restore_forcewake); + + if (intel_uncore_has_fifo(uncore)) + uncore->fifo_count = fifo_free_entries(uncore); + spin_unlock_irq(&uncore->lock); } iosf_mbi_punit_release(); } -void intel_uncore_suspend(struct drm_i915_private *dev_priv) +void intel_uncore_suspend(struct intel_uncore *uncore) { iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &dev_priv->uncore.pmic_bus_access_nb); - dev_priv->uncore.fw_domains_saved = - intel_uncore_forcewake_reset(dev_priv); + &uncore->pmic_bus_access_nb); + uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); } -void intel_uncore_resume_early(struct drm_i915_private *dev_priv) +void intel_uncore_resume_early(struct intel_uncore *uncore) { unsigned int restore_forcewake; - restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved); - __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); + __intel_uncore_early_sanitize(uncore, restore_forcewake); - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); - i915_check_and_clear_faults(dev_priv); + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) +void intel_uncore_runtime_resume(struct intel_uncore *uncore) { - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } void intel_uncore_sanitize(struct drm_i915_private *dev_priv) @@ -598,15 +538,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv) intel_sanitize_gt_powersave(dev_priv); } -static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - fw_domains &= dev_priv->uncore.fw_domains; + fw_domains &= uncore->fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { if (domain->wake_count++) { fw_domains &= ~domain->mask; domain->active = true; @@ -614,12 +554,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, } if (fw_domains) - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); + uncore->funcs.force_wake_get(uncore, fw_domains); } /** * intel_uncore_forcewake_get - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * This function can be used get GT's forcewake domain references. @@ -630,100 +570,100 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, * call to intel_unforce_forcewake_put(). Usually caller wants all the domains * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. */ -void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { unsigned long irqflags; - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - __intel_uncore_forcewake_get(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_get(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, irqflags); } /** * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * * This function is a wrapper around intel_uncore_forcewake_get() to acquire * the GT powerwell and in the process disable our debugging for the * duration of userspace's bypass. */ -void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) +void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) { - spin_lock_irq(&dev_priv->uncore.lock); - if (!dev_priv->uncore.user_forcewake.count++) { - intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); + spin_lock_irq(&uncore->lock); + if (!uncore->user_forcewake.count++) { + intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); /* Save and disable mmio debugging for the user bypass */ - dev_priv->uncore.user_forcewake.saved_mmio_check = - dev_priv->uncore.unclaimed_mmio_check; - dev_priv->uncore.user_forcewake.saved_mmio_debug = + uncore->user_forcewake.saved_mmio_check = + uncore->unclaimed_mmio_check; + uncore->user_forcewake.saved_mmio_debug = i915_modparams.mmio_debug; - dev_priv->uncore.unclaimed_mmio_check = 0; + uncore->unclaimed_mmio_check = 0; i915_modparams.mmio_debug = 0; } - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); } /** * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * * This function complements intel_uncore_forcewake_user_get() and releases * the GT powerwell taken on behalf of the userspace bypass. */ -void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) +void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) { - spin_lock_irq(&dev_priv->uncore.lock); - if (!--dev_priv->uncore.user_forcewake.count) { - if (intel_uncore_unclaimed_mmio(dev_priv)) - dev_info(dev_priv->drm.dev, + spin_lock_irq(&uncore->lock); + if (!--uncore->user_forcewake.count) { + if (intel_uncore_unclaimed_mmio(uncore)) + dev_info(uncore_to_i915(uncore)->drm.dev, "Invalid mmio detected during user access\n"); - dev_priv->uncore.unclaimed_mmio_check = - dev_priv->uncore.user_forcewake.saved_mmio_check; + uncore->unclaimed_mmio_check = + uncore->user_forcewake.saved_mmio_check; i915_modparams.mmio_debug = - dev_priv->uncore.user_forcewake.saved_mmio_debug; + uncore->user_forcewake.saved_mmio_debug; - intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); } - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); } /** * intel_uncore_forcewake_get__locked - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * See intel_uncore_forcewake_get(). This variant places the onus * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. */ -void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - lockdep_assert_held(&dev_priv->uncore.lock); + lockdep_assert_held(&uncore->lock); - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - __intel_uncore_forcewake_get(dev_priv, fw_domains); + __intel_uncore_forcewake_get(uncore, fw_domains); } -static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - fw_domains &= dev_priv->uncore.fw_domains; + fw_domains &= uncore->fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { if (WARN_ON(domain->wake_count == 0)) continue; @@ -738,66 +678,66 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, /** * intel_uncore_forcewake_put - release a forcewake domain reference - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to put references * * This function drops the device-level forcewakes for specified * domains obtained by intel_uncore_forcewake_get(). */ -void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { unsigned long irqflags; - if (!dev_priv->uncore.funcs.force_wake_put) + if (!uncore->funcs.force_wake_put) return; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - __intel_uncore_forcewake_put(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_put(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, irqflags); } /** * intel_uncore_forcewake_put__locked - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * See intel_uncore_forcewake_put(). This variant places the onus * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. */ -void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - lockdep_assert_held(&dev_priv->uncore.lock); + lockdep_assert_held(&uncore->lock); - if (!dev_priv->uncore.funcs.force_wake_put) + if (!uncore->funcs.force_wake_put) return; - __intel_uncore_forcewake_put(dev_priv, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains); } -void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) +void assert_forcewakes_inactive(struct intel_uncore *uncore) { - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - WARN(dev_priv->uncore.fw_domains_active, + WARN(uncore->fw_domains_active, "Expected all fw_domains to be inactive, but %08x are still on\n", - dev_priv->uncore.fw_domains_active); + uncore->fw_domains_active); } -void assert_forcewakes_active(struct drm_i915_private *dev_priv, +void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); - fw_domains &= dev_priv->uncore.fw_domains; - WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, + fw_domains &= uncore->fw_domains; + WARN(fw_domains & ~uncore->fw_domains_active, "Expected %08x fw_domains to be active, but %08x are off\n", - fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); + fw_domains, fw_domains & ~uncore->fw_domains_active); } /* We give fast paths for the really cool registers */ @@ -806,7 +746,7 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv, #define GEN11_NEEDS_FORCE_WAKE(reg) \ ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) -#define __gen6_reg_read_fw_domains(offset) \ +#define __gen6_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ if (NEEDS_FORCE_WAKE(offset)) \ @@ -846,13 +786,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) }) static enum forcewake_domains -find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) +find_fw_domain(struct intel_uncore *uncore, u32 offset) { const struct intel_forcewake_range *entry; entry = BSEARCH(offset, - dev_priv->uncore.fw_domains_table, - dev_priv->uncore.fw_domains_table_entries, + uncore->fw_domains_table, + uncore->fw_domains_table_entries, fw_range_cmp); if (!entry) @@ -864,11 +804,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) * translate it here to the list of available domains. */ if (entry->domains == FORCEWAKE_ALL) - return dev_priv->uncore.fw_domains; + return uncore->fw_domains; - WARN(entry->domains & ~dev_priv->uncore.fw_domains, + WARN(entry->domains & ~uncore->fw_domains, "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", - entry->domains & ~dev_priv->uncore.fw_domains, offset); + entry->domains & ~uncore->fw_domains, offset); return entry->domains; } @@ -892,19 +832,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), }; -#define __fwtable_reg_read_fw_domains(offset) \ +#define __fwtable_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) -#define __gen11_fwtable_reg_read_fw_domains(offset) \ +#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (GEN11_NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) @@ -956,7 +896,7 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) -#define __gen8_reg_write_fw_domains(offset) \ +#define __gen8_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ @@ -986,19 +926,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), }; -#define __fwtable_reg_write_fw_domains(offset) \ +#define __fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) -#define __gen11_fwtable_reg_write_fw_domains(offset) \ +#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) @@ -1073,21 +1013,21 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { }; static void -ilk_dummy_write(struct drm_i915_private *dev_priv) +ilk_dummy_write(struct intel_uncore *uncore) { /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up * the chip from rc6 before touching it for real. MI_MODE is masked, * hence harmless to write 0 into. */ - __raw_i915_write32(dev_priv, MI_MODE, 0); + __raw_uncore_write32(uncore, MI_MODE, 0); } static void -__unclaimed_reg_debug(struct drm_i915_private *dev_priv, +__unclaimed_reg_debug(struct intel_uncore *uncore, const i915_reg_t reg, const bool read, const bool before) { - if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, + if (WARN(check_for_unclaimed_mmio(uncore) && !before, "Unclaimed %s register 0x%x\n", read ? "read from" : "write to", i915_mmio_reg_offset(reg))) @@ -1096,7 +1036,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, } static inline void -unclaimed_reg_debug(struct drm_i915_private *dev_priv, +unclaimed_reg_debug(struct intel_uncore *uncore, const i915_reg_t reg, const bool read, const bool before) @@ -1104,12 +1044,12 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, if (likely(!i915_modparams.mmio_debug)) return; - __unclaimed_reg_debug(dev_priv, reg, read, before); + __unclaimed_reg_debug(uncore, reg, read, before); } #define GEN2_READ_HEADER(x) \ u##x val = 0; \ - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); #define GEN2_READ_FOOTER \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ @@ -1117,18 +1057,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, #define __gen2_read(x) \ static u##x \ -gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ - val = __raw_i915_read##x(dev_priv, reg); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN2_READ_FOOTER; \ } #define __gen5_read(x) \ static u##x \ -gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ - ilk_dummy_write(dev_priv); \ - val = __raw_i915_read##x(dev_priv, reg); \ + ilk_dummy_write(uncore); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN2_READ_FOOTER; \ } @@ -1151,53 +1091,53 @@ __gen2_read(64) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ u##x val = 0; \ - assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - unclaimed_reg_debug(dev_priv, reg, true, true) + __assert_rpm_wakelock_held(uncore->rpm); \ + spin_lock_irqsave(&uncore->lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, true, true) #define GEN6_READ_FOOTER \ - unclaimed_reg_debug(dev_priv, reg, true, false); \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, true, false); \ + spin_unlock_irqrestore(&uncore->lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val -static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, +static noinline void ___force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) fw_domain_arm_timer(domain); - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); + uncore->funcs.force_wake_get(uncore, fw_domains); } -static inline void __force_wake_auto(struct drm_i915_private *dev_priv, +static inline void __force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { if (WARN_ON(!fw_domains)) return; /* Turn on all requested but inactive supported forcewake domains. */ - fw_domains &= dev_priv->uncore.fw_domains; - fw_domains &= ~dev_priv->uncore.fw_domains_active; + fw_domains &= uncore->fw_domains; + fw_domains &= ~uncore->fw_domains_active; if (fw_domains) - ___force_wake_auto(dev_priv, fw_domains); + ___force_wake_auto(uncore, fw_domains); } #define __gen_read(func, x) \ static u##x \ -func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - fw_engine = __##func##_reg_read_fw_domains(offset); \ + fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - val = __raw_i915_read##x(dev_priv, reg); \ + __force_wake_auto(uncore, fw_engine); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN6_READ_FOOTER; \ } #define __gen6_read(x) __gen_read(gen6, x) @@ -1225,24 +1165,24 @@ __gen6_read(64) #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_rpm_wakelock_held(dev_priv); \ + __assert_rpm_wakelock_held(uncore->rpm); \ #define GEN2_WRITE_FOOTER #define __gen2_write(x) \ static void \ -gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ - __raw_i915_write##x(dev_priv, reg, val); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN2_WRITE_FOOTER; \ } #define __gen5_write(x) \ static void \ -gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ - ilk_dummy_write(dev_priv); \ - __raw_i915_write##x(dev_priv, reg, val); \ + ilk_dummy_write(uncore); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN2_WRITE_FOOTER; \ } @@ -1263,33 +1203,33 @@ __gen2_write(32) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - unclaimed_reg_debug(dev_priv, reg, false, true) + __assert_rpm_wakelock_held(uncore->rpm); \ + spin_lock_irqsave(&uncore->lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, false, true) #define GEN6_WRITE_FOOTER \ - unclaimed_reg_debug(dev_priv, reg, false, false); \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) + unclaimed_reg_debug(uncore, reg, false, false); \ + spin_unlock_irqrestore(&uncore->lock, irqflags) #define __gen6_write(x) \ static void \ -gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN6_WRITE_HEADER; \ if (NEEDS_FORCE_WAKE(offset)) \ - __gen6_gt_wait_for_fifo(dev_priv); \ - __raw_i915_write##x(dev_priv, reg, val); \ + __gen6_gt_wait_for_fifo(uncore); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } #define __gen_write(func, x) \ static void \ -func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - fw_engine = __##func##_reg_write_fw_domains(offset); \ + fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - __raw_i915_write##x(dev_priv, reg, val); \ + __force_wake_auto(uncore, fw_engine); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } #define __gen8_write(x) __gen_write(gen8, x) @@ -1316,23 +1256,23 @@ __gen6_write(32) #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER -#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ - (i915)->uncore.funcs.mmio_writeb = x##_write8; \ - (i915)->uncore.funcs.mmio_writew = x##_write16; \ - (i915)->uncore.funcs.mmio_writel = x##_write32; \ + (uncore)->funcs.mmio_writeb = x##_write8; \ + (uncore)->funcs.mmio_writew = x##_write16; \ + (uncore)->funcs.mmio_writel = x##_write32; \ } while (0) -#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ do { \ - (i915)->uncore.funcs.mmio_readb = x##_read8; \ - (i915)->uncore.funcs.mmio_readw = x##_read16; \ - (i915)->uncore.funcs.mmio_readl = x##_read32; \ - (i915)->uncore.funcs.mmio_readq = x##_read64; \ + (uncore)->funcs.mmio_readb = x##_read8; \ + (uncore)->funcs.mmio_readw = x##_read16; \ + (uncore)->funcs.mmio_readl = x##_read32; \ + (uncore)->funcs.mmio_readq = x##_read64; \ } while (0) -static void fw_domain_init(struct drm_i915_private *dev_priv, +static void fw_domain_init(struct intel_uncore *uncore, enum forcewake_domain_id domain_id, i915_reg_t reg_set, i915_reg_t reg_ack) @@ -1342,7 +1282,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) return; - d = &dev_priv->uncore.fw_domain[domain_id]; + d = &uncore->fw_domain[domain_id]; WARN_ON(d->wake_count); @@ -1350,8 +1290,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, WARN_ON(!i915_mmio_reg_valid(reg_ack)); d->wake_count = 0; - d->reg_set = reg_set; - d->reg_ack = reg_ack; + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); d->id = domain_id; @@ -1371,12 +1311,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); d->timer.function = intel_uncore_fw_release_timer; - dev_priv->uncore.fw_domains |= BIT(domain_id); + uncore->fw_domains |= BIT(domain_id); - fw_domain_reset(dev_priv, d); + fw_domain_reset(d); } -static void fw_domain_fini(struct drm_i915_private *dev_priv, +static void fw_domain_fini(struct intel_uncore *uncore, enum forcewake_domain_id domain_id) { struct intel_uncore_forcewake_domain *d; @@ -1384,85 +1324,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv, if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) return; - d = &dev_priv->uncore.fw_domain[domain_id]; + d = &uncore->fw_domain[domain_id]; WARN_ON(d->wake_count); WARN_ON(hrtimer_cancel(&d->timer)); memset(d, 0, sizeof(*d)); - dev_priv->uncore.fw_domains &= ~BIT(domain_id); + uncore->fw_domains &= ~BIT(domain_id); } -static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) +static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) { - if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) - return; + struct drm_i915_private *i915 = uncore_to_i915(uncore); - if (IS_GEN6(dev_priv)) { - dev_priv->uncore.fw_reset = 0; - dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; - dev_priv->uncore.fw_clear = 0; - } else { - /* WaRsClearFWBitsAtReset:bdw,skl */ - dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); - dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); - dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); - } + if (!intel_uncore_has_forcewake(uncore)) + return; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { int i; - dev_priv->uncore.funcs.force_wake_get = + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(dev_priv, _VCS(i))) + if (!HAS_ENGINE(i915, _VCS(i))) continue; - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, FORCEWAKE_MEDIA_VDBOX_GEN11(i), FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); } for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(dev_priv, _VECS(i))) + if (!HAS_ENGINE(i915, _VECS(i))) continue; - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, FORCEWAKE_MEDIA_VEBOX_GEN11(i), FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } - } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_GEN_RANGE(i915, 9, 10)) { + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + uncore->funcs.force_wake_get = fw_domains_get; + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev_priv)) { + } else if (IS_IVYBRIDGE(i915)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1474,9 +1405,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) * (correctly) interpreted by the test below as MT * forcewake being disabled. */ - dev_priv->uncore.funcs.force_wake_get = + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; + uncore->funcs.force_wake_put = fw_domains_put; /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is @@ -1485,41 +1416,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) * before the ecobus check. */ - __raw_i915_write32(dev_priv, FORCEWAKE, 0); - __raw_posting_read(dev_priv, ECOBUS); + __raw_uncore_write32(uncore, FORCEWAKE, 0); + __raw_posting_read(uncore, ECOBUS); - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - spin_lock_irq(&dev_priv->uncore.lock); - fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); - ecobus = __raw_i915_read32(dev_priv, ECOBUS); - fw_domains_put(dev_priv, FORCEWAKE_RENDER); - spin_unlock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); + fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); + ecobus = __raw_uncore_read32(uncore, ECOBUS); + fw_domains_put(uncore, FORCEWAKE_RENDER); + spin_unlock_irq(&uncore->lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("when using vblank-synced partial screen updates.\n"); - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN6(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_GEN(i915, 6)) { + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } /* All future platforms are expected to require complex power gating */ - WARN_ON(dev_priv->uncore.fw_domains == 0); + WARN_ON(uncore->fw_domains == 0); } -#define ASSIGN_FW_DOMAINS_TABLE(d) \ +#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ { \ - dev_priv->uncore.fw_domains_table = \ + (uncore)->fw_domains_table = \ (struct intel_forcewake_range *)(d); \ - dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ + (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ } static int i915_pmic_bus_access_notifier(struct notifier_block *nb, @@ -1544,66 +1475,132 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * the access. */ disable_rpm_wakeref_asserts(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); enable_rpm_wakeref_asserts(dev_priv); break; case MBI_PMIC_BUS_ACCESS_END: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); break; } return NOTIFY_OK; } -void intel_uncore_init(struct drm_i915_private *dev_priv) +static int uncore_mmio_setup(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct pci_dev *pdev = i915->drm.pdev; + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN(i915, 2) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_GEN(i915) < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); + if (uncore->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + return 0; +} + +static void uncore_mmio_cleanup(struct intel_uncore *uncore) { - i915_check_vgpu(dev_priv); + struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct pci_dev *pdev = i915->drm.pdev; + + pci_iounmap(pdev, uncore->regs); +} + +void intel_uncore_init_early(struct intel_uncore *uncore) +{ + spin_lock_init(&uncore->lock); +} + +int intel_uncore_init_mmio(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore_to_i915(uncore); + int ret; + + ret = uncore_mmio_setup(uncore); + if (ret) + return ret; - intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev_priv); - __intel_uncore_early_sanitize(dev_priv, 0); + i915_check_vgpu(i915); - dev_priv->uncore.unclaimed_mmio_check = 1; - dev_priv->uncore.pmic_bus_access_nb.notifier_call = + if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) + uncore->flags |= UNCORE_HAS_FORCEWAKE; + + intel_uncore_fw_domains_init(uncore); + __intel_uncore_early_sanitize(uncore, 0); + + uncore->unclaimed_mmio_check = 1; + uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; - if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); - } else if (IS_GEN5(dev_priv)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); - } else if (IS_GEN(dev_priv, 6, 7)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); - - if (IS_VALLEYVIEW(dev_priv)) { - ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + uncore->rpm = &i915->runtime_pm; + + if (!intel_uncore_has_forcewake(uncore)) { + if (IS_GEN(i915, 5)) { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen5); } else { - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen2); } - } else if (IS_GEN8(dev_priv)) { - if (IS_CHERRYVIEW(dev_priv)) { - ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + } else if (IS_GEN_RANGE(i915, 6, 7)) { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); + if (IS_VALLEYVIEW(i915)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); } - } else if (IS_GEN(dev_priv, 9, 10)) { - ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + } else if (IS_GEN(i915, 8)) { + if (IS_CHERRYVIEW(i915)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); + + } else { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); + } + } else if (IS_GEN_RANGE(i915, 9, 10)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else { - ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable); + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); } - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); + if (HAS_FPGA_DBG_UNCLAIMED(i915)) + uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; + + if (IS_GEN_RANGE(i915, 6, 7)) + uncore->flags |= UNCORE_HAS_FIFO; + + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + + return 0; } /* @@ -1611,45 +1608,48 @@ void intel_uncore_init(struct drm_i915_private *dev_priv) * the forcewake domains. Prune them, to make sure they only reference existing * engines. */ -void intel_uncore_prune(struct drm_i915_private *dev_priv) +void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) { - if (INTEL_GEN(dev_priv) >= 11) { - enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains; + struct drm_i915_private *i915 = uncore_to_i915(uncore); + + if (INTEL_GEN(i915) >= 11) { + enum forcewake_domains fw_domains = uncore->fw_domains; enum forcewake_domain_id domain_id; int i; for (i = 0; i < I915_MAX_VCS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; - if (HAS_ENGINE(dev_priv, _VCS(i))) + if (HAS_ENGINE(i915, _VCS(i))) continue; if (fw_domains & BIT(domain_id)) - fw_domain_fini(dev_priv, domain_id); + fw_domain_fini(uncore, domain_id); } for (i = 0; i < I915_MAX_VECS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; - if (HAS_ENGINE(dev_priv, _VECS(i))) + if (HAS_ENGINE(i915, _VECS(i))) continue; if (fw_domains & BIT(domain_id)) - fw_domain_fini(dev_priv, domain_id); + fw_domain_fini(uncore, domain_id); } } } -void intel_uncore_fini(struct drm_i915_private *dev_priv) +void intel_uncore_fini_mmio(struct intel_uncore *uncore) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev_priv); + intel_uncore_sanitize(uncore_to_i915(uncore)); iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &dev_priv->uncore.pmic_bus_access_nb); - intel_uncore_forcewake_reset(dev_priv); + &uncore->pmic_bus_access_nb); + intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); + uncore_mmio_cleanup(uncore); } static const struct reg_whitelist { @@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; struct reg_whitelist const *entry; + intel_wakeref_t wakeref; unsigned int flags; int remain; int ret = 0; @@ -1695,289 +1696,28 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - intel_runtime_pm_get(dev_priv); - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = I915_READ64_2x32(entry->offset_ldw, - entry->offset_udw); - else if (entry->size == 8 && flags == 0) - reg->val = I915_READ64(entry->offset_ldw); - else if (entry->size == 4 && flags == 0) - reg->val = I915_READ(entry->offset_ldw); - else if (entry->size == 2 && flags == 0) - reg->val = I915_READ16(entry->offset_ldw); - else if (entry->size == 1 && flags == 0) - reg->val = I915_READ8(entry->offset_ldw); - else - ret = -EINVAL; - intel_runtime_pm_put(dev_priv); - - return ret; -} - -static void gen3_stop_engine(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - const u32 base = engine->mmio_base; - - if (intel_engine_stop_cs(engine)) - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); - - I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); - POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ - - I915_WRITE_FW(RING_HEAD(base), 0); - I915_WRITE_FW(RING_TAIL(base), 0); - POSTING_READ_FW(RING_TAIL(base)); - - /* The ring must be empty before it is disabled */ - I915_WRITE_FW(RING_CTL(base), 0); - - /* Check acts as a post */ - if (I915_READ_FW(RING_HEAD(base)) != 0) - DRM_DEBUG_DRIVER("%s: ring head not parked\n", - engine->name); -} - -static void i915_stop_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (INTEL_GEN(dev_priv) < 3) - return; - - for_each_engine_masked(engine, dev_priv, engine_mask, id) - gen3_stop_engine(engine); -} - -static bool i915_in_reset(struct pci_dev *pdev) -{ - u8 gdrst; - - pci_read_config_byte(pdev, I915_GDRST, &gdrst); - return gdrst & GRDOM_RESET_STATUS; -} - -static int i915_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int err; - - /* Assert reset for at least 20 usec, and wait for acknowledgement. */ - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); - usleep_range(50, 200); - err = wait_for(i915_in_reset(pdev), 500); - - /* Clear the reset request. */ - pci_write_config_byte(pdev, I915_GDRST, 0); - usleep_range(50, 200); - if (!err) - err = wait_for(!i915_in_reset(pdev), 500); - - return err; -} - -static bool g4x_reset_complete(struct pci_dev *pdev) -{ - u8 gdrst; - - pci_read_config_byte(pdev, I915_GDRST, &gdrst); - return (gdrst & GRDOM_RESET_ENABLE) == 0; -} - -static int g33_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(pdev), 500); -} - -static int g4x_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int ret; - - /* WaVcpClkGateDisableForMediaReset:ctg,elk */ - I915_WRITE(VDECCLK_GATE_D, - I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); - POSTING_READ(VDECCLK_GATE_D); - - pci_write_config_byte(pdev, I915_GDRST, - GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(pdev), 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); - goto out; + with_intel_runtime_pm(dev_priv, wakeref) { + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else + ret = -EINVAL; } - pci_write_config_byte(pdev, I915_GDRST, - GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(pdev), 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); - goto out; - } - -out: - pci_write_config_byte(pdev, I915_GDRST, 0); - - I915_WRITE(VDECCLK_GATE_D, - I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); - POSTING_READ(VDECCLK_GATE_D); - return ret; } -static int ironlake_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - int ret; - - I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); - ret = intel_wait_for_register(dev_priv, - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, - 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); - goto out; - } - - I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); - ret = intel_wait_for_register(dev_priv, - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, - 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); - goto out; - } - -out: - I915_WRITE(ILK_GDSR, 0); - POSTING_READ(ILK_GDSR); - return ret; -} - -/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ -static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, - u32 hw_domain_mask) -{ - int err; - - /* GEN6_GDRST is not in the gt power well, no need to check - * for fifo space for the write or forcewake the chip for - * the read - */ - __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); - - /* Wait for the device to ack the reset requests */ - err = __intel_wait_for_register_fw(dev_priv, - GEN6_GDRST, hw_domain_mask, 0, - 500, 0, - NULL); - if (err) - DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", - hw_domain_mask); - - return err; -} - -/** - * gen6_reset_engines - reset individual engines - * @dev_priv: i915 device - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset - * @retry: the count of of previous attempts to reset. - * - * This function will reset the individual engines that are set in engine_mask. - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. - * - * Note: It is responsibility of the caller to handle the difference between - * asking full domain reset versus reset for all available individual engines. - * - * Returns 0 on success, nonzero on error. - */ -static int gen6_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct intel_engine_cs *engine; - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN6_GRDOM_RENDER, - [BCS] = GEN6_GRDOM_BLT, - [VCS] = GEN6_GRDOM_MEDIA, - [VCS2] = GEN8_GRDOM_MEDIA2, - [VECS] = GEN6_GRDOM_VECS, - }; - u32 hw_mask; - - if (engine_mask == ALL_ENGINES) { - hw_mask = GEN6_GRDOM_FULL; - } else { - unsigned int tmp; - - hw_mask = 0; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - hw_mask |= hw_engine_mask[engine->id]; - } - - return gen6_hw_domain_reset(dev_priv, hw_mask); -} - -/** - * gen11_reset_engines - reset individual engines - * @dev_priv: i915 device - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset - * - * This function will reset the individual engines that are set in engine_mask. - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. - * - * Note: It is responsibility of the caller to handle the difference between - * asking full domain reset versus reset for all available individual engines. - * - * Returns 0 on success, nonzero on error. - */ -static int gen11_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask) -{ - struct intel_engine_cs *engine; - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN11_GRDOM_RENDER, - [BCS] = GEN11_GRDOM_BLT, - [VCS] = GEN11_GRDOM_MEDIA, - [VCS2] = GEN11_GRDOM_MEDIA2, - [VCS3] = GEN11_GRDOM_MEDIA3, - [VCS4] = GEN11_GRDOM_MEDIA4, - [VECS] = GEN11_GRDOM_VECS, - [VECS2] = GEN11_GRDOM_VECS2, - }; - u32 hw_mask; - - BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); - - if (engine_mask == ALL_ENGINES) { - hw_mask = GEN11_GRDOM_FULL; - } else { - unsigned int tmp; - - hw_mask = 0; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - hw_mask |= hw_engine_mask[engine->id]; - } - - return gen6_hw_domain_reset(dev_priv, hw_mask); -} - /** * __intel_wait_for_register_fw - wait until register matches expected state - * @dev_priv: the i915 device + * @uncore: the struct intel_uncore * @reg: the register to read * @mask: mask to apply to register value * @value: expected value @@ -2001,7 +1741,7 @@ static int gen11_reset_engines(struct drm_i915_private *dev_priv, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, +int __intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, @@ -2010,7 +1750,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, u32 *out_value) { u32 uninitialized_var(reg_value); -#define done (((reg_value = I915_READ_FW(reg)) & mask) == value) +#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; /* Catch any overuse of this function */ @@ -2032,7 +1772,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, /** * __intel_wait_for_register - wait until register matches expected state - * @dev_priv: the i915 device + * @uncore: the struct intel_uncore * @reg: the register to read * @mask: mask to apply to register value * @value: expected value @@ -2049,308 +1789,130 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int __intel_wait_for_register(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, - u32 *out_value) +int __intel_wait_for_register(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value) { unsigned fw = - intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); u32 reg_value; int ret; might_sleep_if(slow_timeout_ms); - spin_lock_irq(&dev_priv->uncore.lock); - intel_uncore_forcewake_get__locked(dev_priv, fw); + spin_lock_irq(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw); - ret = __intel_wait_for_register_fw(dev_priv, + ret = __intel_wait_for_register_fw(uncore, reg, mask, value, fast_timeout_us, 0, ®_value); - intel_uncore_forcewake_put__locked(dev_priv, fw); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_uncore_forcewake_put__locked(uncore, fw); + spin_unlock_irq(&uncore->lock); if (ret && slow_timeout_ms) - ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), + ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, + reg), (reg_value & mask) == value, slow_timeout_ms * 1000, 10, 1000); + /* just trace the final value */ + trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); + if (out_value) *out_value = reg_value; return ret; } -static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int ret; - - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - - ret = __intel_wait_for_register_fw(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700, 0, - NULL); - if (ret) - DRM_ERROR("%s: reset request timeout\n", engine->name); - - return ret; -} - -static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); -} - -static int reset_engines(struct drm_i915_private *i915, - unsigned int engine_mask, - unsigned int retry) -{ - if (INTEL_GEN(i915) >= 11) - return gen11_reset_engines(i915, engine_mask); - else - return gen6_reset_engines(i915, engine_mask, retry); -} - -static int gen8_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct intel_engine_cs *engine; - const bool reset_non_ready = retry >= 1; - unsigned int tmp; - int ret; - - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - ret = gen8_engine_reset_prepare(engine); - if (ret && !reset_non_ready) - goto skip_reset; - - /* - * If this is not the first failed attempt to prepare, - * we decide to proceed anyway. - * - * By doing so we risk context corruption and with - * some gens (kbl), possible system hang if reset - * happens during active bb execution. - * - * We rather take context corruption instead of - * failed reset with a wedged driver/gpu. And - * active bb execution case should be covered by - * i915_stop_engines we have before the reset. - */ - } - - ret = reset_engines(dev_priv, engine_mask, retry); - -skip_reset: - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - gen8_engine_reset_cancel(engine); - - return ret; -} - -typedef int (*reset_func)(struct drm_i915_private *, - unsigned int engine_mask, unsigned int retry); - -static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) +bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) { - if (!i915_modparams.reset) - return NULL; - - if (INTEL_GEN(dev_priv) >= 8) - return gen8_reset_engines; - else if (INTEL_GEN(dev_priv) >= 6) - return gen6_reset_engines; - else if (IS_GEN5(dev_priv)) - return ironlake_do_reset; - else if (IS_G4X(dev_priv)) - return g4x_do_reset; - else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) - return g33_do_reset; - else if (INTEL_GEN(dev_priv) >= 3) - return i915_do_reset; - else - return NULL; -} - -int intel_gpu_reset(struct drm_i915_private *dev_priv, - const unsigned int engine_mask) -{ - reset_func reset = intel_get_gpu_reset(dev_priv); - unsigned int retry; - int ret; - - GEM_BUG_ON(!engine_mask); - - /* - * We want to perform per-engine reset from atomic context (e.g. - * softirq), which imposes the constraint that we cannot sleep. - * However, experience suggests that spending a bit of time waiting - * for a reset helps in various cases, so for a full-device reset - * we apply the opposite rule and wait if we want to. As we should - * always follow up a failed per-engine reset with a full device reset, - * being a little faster, stricter and more error prone for the - * atomic case seems an acceptable compromise. - * - * Unfortunately this leads to a bimodal routine, when the goal was - * to have a single reset function that worked for resetting any - * number of engines simultaneously. - */ - might_sleep_if(engine_mask == ALL_ENGINES); - - /* - * If the power well sleeps during the reset, the reset - * request may be dropped and never completes (causing -EIO). - */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - for (retry = 0; retry < 3; retry++) { - - /* - * We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). Also as modern gpu as kbl can suffer - * from system hang if batchbuffer is progressing when - * the reset is issued, regardless of READY_TO_RESET ack. - * Thus assume it is best to stop engines on all gens - * where we have a gpu reset. - * - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) - * - * WaMediaResetMainRingCleanup:ctg,elk (presumably) - * - * FIXME: Wa for more modern gens needs to be validated - */ - i915_stop_engines(dev_priv, engine_mask); - - ret = -ENODEV; - if (reset) { - ret = reset(dev_priv, engine_mask, retry); - GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n", - engine_mask, ret, retry); - } - if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) - break; - - cond_resched(); - } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - - return ret; -} - -bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) -{ - return intel_get_gpu_reset(dev_priv) != NULL; -} - -bool intel_has_reset_engine(struct drm_i915_private *dev_priv) -{ - return (dev_priv->info.has_reset_engine && - i915_modparams.reset >= 2); -} - -int intel_reset_guc(struct drm_i915_private *dev_priv) -{ - u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC : - GEN9_GRDOM_GUC; - int ret; - - GEM_BUG_ON(!HAS_GUC(dev_priv)); - - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = gen6_hw_domain_reset(dev_priv, guc_domain); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - - return ret; -} - -bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) -{ - return check_for_unclaimed_mmio(dev_priv); + return check_for_unclaimed_mmio(uncore); } bool -intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) +intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) { bool ret = false; - spin_lock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); - if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) + if (unlikely(uncore->unclaimed_mmio_check <= 0)) goto out; - if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { + if (unlikely(intel_uncore_unclaimed_mmio(uncore))) { if (!i915_modparams.mmio_debug) { DRM_DEBUG("Unclaimed register detected, " "enabling oneshot unclaimed register reporting. " "Please use i915.mmio_debug=N for more information.\n"); i915_modparams.mmio_debug++; } - dev_priv->uncore.unclaimed_mmio_check--; + uncore->unclaimed_mmio_check--; ret = true; } out: - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); return ret; } static enum forcewake_domains -intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_read(struct intel_uncore *uncore, i915_reg_t reg) { + struct drm_i915_private *i915 = uncore_to_i915(uncore); u32 offset = i915_mmio_reg_offset(reg); enum forcewake_domains fw_domains; - if (INTEL_GEN(dev_priv) >= 11) { - fw_domains = __gen11_fwtable_reg_read_fw_domains(offset); - } else if (HAS_FWTABLE(dev_priv)) { - fw_domains = __fwtable_reg_read_fw_domains(offset); - } else if (INTEL_GEN(dev_priv) >= 6) { - fw_domains = __gen6_reg_read_fw_domains(offset); + if (INTEL_GEN(i915) >= 11) { + fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); + } else if (HAS_FWTABLE(i915)) { + fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); + } else if (INTEL_GEN(i915) >= 6) { + fw_domains = __gen6_reg_read_fw_domains(uncore, offset); } else { - WARN_ON(!IS_GEN(dev_priv, 2, 5)); + /* on devices with FW we expect to hit one of the above cases */ + if (intel_uncore_has_forcewake(uncore)) + MISSING_CASE(INTEL_GEN(i915)); + fw_domains = 0; } - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } static enum forcewake_domains -intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_write(struct intel_uncore *uncore, i915_reg_t reg) { + struct drm_i915_private *i915 = uncore_to_i915(uncore); u32 offset = i915_mmio_reg_offset(reg); enum forcewake_domains fw_domains; - if (INTEL_GEN(dev_priv) >= 11) { - fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); - } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { - fw_domains = __fwtable_reg_write_fw_domains(offset); - } else if (IS_GEN8(dev_priv)) { - fw_domains = __gen8_reg_write_fw_domains(offset); - } else if (IS_GEN(dev_priv, 6, 7)) { + if (INTEL_GEN(i915) >= 11) { + fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); + } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) { + fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); + } else if (IS_GEN(i915, 8)) { + fw_domains = __gen8_reg_write_fw_domains(uncore, offset); + } else if (IS_GEN_RANGE(i915, 6, 7)) { fw_domains = FORCEWAKE_RENDER; } else { - WARN_ON(!IS_GEN(dev_priv, 2, 5)); + /* on devices with FW we expect to hit one of the above cases */ + if (intel_uncore_has_forcewake(uncore)) + MISSING_CASE(INTEL_GEN(i915)); + fw_domains = 0; } - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } @@ -2358,7 +1920,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, /** * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access * a register - * @dev_priv: pointer to struct drm_i915_private + * @uncore: pointer to struct intel_uncore * @reg: register in question * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE * @@ -2370,21 +1932,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, * callers to do FIFO management on their own or risk losing writes. */ enum forcewake_domains -intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, i915_reg_t reg, unsigned int op) { enum forcewake_domains fw_domains = 0; WARN_ON(!op); - if (intel_vgpu_active(dev_priv)) + if (!intel_uncore_has_forcewake(uncore)) return 0; if (op & FW_REG_READ) - fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); + fw_domains = intel_uncore_forcewake_for_read(uncore, reg); if (op & FW_REG_WRITE) - fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); + fw_domains |= intel_uncore_forcewake_for_write(uncore, reg); return fw_domains; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index e5e157d288de..a64d3dc0db4d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -28,10 +28,13 @@ #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/hrtimer.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "i915_reg.h" struct drm_i915_private; +struct i915_runtime_pm; +struct intel_uncore; enum forcewake_domain_id { FW_DOMAIN_ID_RENDER = 0, @@ -62,25 +65,25 @@ enum forcewake_domains { }; struct intel_uncore_funcs { - void (*force_wake_get)(struct drm_i915_private *dev_priv, + void (*force_wake_get)(struct intel_uncore *uncore, enum forcewake_domains domains); - void (*force_wake_put)(struct drm_i915_private *dev_priv, + void (*force_wake_put)(struct intel_uncore *uncore, enum forcewake_domains domains); - u8 (*mmio_readb)(struct drm_i915_private *dev_priv, + u8 (*mmio_readb)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u16 (*mmio_readw)(struct drm_i915_private *dev_priv, + u16 (*mmio_readw)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u32 (*mmio_readl)(struct drm_i915_private *dev_priv, + u32 (*mmio_readl)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u64 (*mmio_readq)(struct drm_i915_private *dev_priv, + u64 (*mmio_readq)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - void (*mmio_writeb)(struct drm_i915_private *dev_priv, + void (*mmio_writeb)(struct intel_uncore *uncore, i915_reg_t r, u8 val, bool trace); - void (*mmio_writew)(struct drm_i915_private *dev_priv, + void (*mmio_writew)(struct intel_uncore *uncore, i915_reg_t r, u16 val, bool trace); - void (*mmio_writel)(struct drm_i915_private *dev_priv, + void (*mmio_writel)(struct intel_uncore *uncore, i915_reg_t r, u32 val, bool trace); }; @@ -92,8 +95,18 @@ struct intel_forcewake_range { }; struct intel_uncore { + void __iomem *regs; + + struct i915_runtime_pm *rpm; + spinlock_t lock; /** lock is also taken in irq contexts. */ + unsigned int flags; +#define UNCORE_HAS_FORCEWAKE BIT(0) +#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) +#define UNCORE_HAS_DBG_UNCLAIMED BIT(2) +#define UNCORE_HAS_FIFO BIT(3) + const struct intel_forcewake_range *fw_domains_table; unsigned int fw_domains_table_entries; @@ -106,18 +119,14 @@ struct intel_uncore { enum forcewake_domains fw_domains_active; enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ - u32 fw_set; - u32 fw_clear; - u32 fw_reset; - struct intel_uncore_forcewake_domain { enum forcewake_domain_id id; enum forcewake_domains mask; unsigned int wake_count; bool active; struct hrtimer timer; - i915_reg_t reg_set; - i915_reg_t reg_ack; + u32 __iomem *reg_set; + u32 __iomem *reg_ack; } fw_domain[FW_DOMAIN_ID_COUNT]; struct { @@ -131,86 +140,242 @@ struct intel_uncore { }; /* Iterate over initialised fw domains */ -#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \ +#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ for (tmp__ = (mask__); \ - tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + +#define for_each_fw_domain(domain__, uncore__, tmp__) \ + for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) + +static inline struct intel_uncore * +forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d) +{ + return container_of(d, struct intel_uncore, fw_domain[d->id]); +} + +static inline bool +intel_uncore_has_forcewake(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FORCEWAKE; +} + +static inline bool +intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED; +} -#define for_each_fw_domain(domain__, dev_priv__, tmp__) \ - for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__) +static inline bool +intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED; +} +static inline bool +intel_uncore_has_fifo(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FIFO; +} void intel_uncore_sanitize(struct drm_i915_private *dev_priv); -void intel_uncore_init(struct drm_i915_private *dev_priv); -void intel_uncore_prune(struct drm_i915_private *dev_priv); -bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); -bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -void intel_uncore_fini(struct drm_i915_private *dev_priv); -void intel_uncore_suspend(struct drm_i915_private *dev_priv); -void intel_uncore_resume_early(struct drm_i915_private *dev_priv); -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); - -u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); -void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -void assert_forcewakes_active(struct drm_i915_private *dev_priv, +void intel_uncore_init_early(struct intel_uncore *uncore); +int intel_uncore_init_mmio(struct intel_uncore *uncore); +void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); +bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); +bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); +void intel_uncore_fini_mmio(struct intel_uncore *uncore); +void intel_uncore_suspend(struct intel_uncore *uncore); +void intel_uncore_resume_early(struct intel_uncore *uncore); +void intel_uncore_runtime_resume(struct intel_uncore *uncore); + +void assert_forcewakes_inactive(struct intel_uncore *uncore); +void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); enum forcewake_domains -intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, i915_reg_t reg, unsigned int op); #define FW_REG_READ (1) #define FW_REG_WRITE (2) -void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); /* Like above but the caller must manage the uncore.lock itself. * Must be used with I915_READ_FW and friends. */ -void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); -void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); +void intel_uncore_forcewake_user_get(struct intel_uncore *uncore); +void intel_uncore_forcewake_user_put(struct intel_uncore *uncore); -int __intel_wait_for_register(struct drm_i915_private *dev_priv, +int __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, unsigned int fast_timeout_us, unsigned int slow_timeout_ms, u32 *out_value); -static inline -int intel_wait_for_register(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, - unsigned int timeout_ms) +static inline int +intel_wait_for_register(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int timeout_ms) { - return __intel_wait_for_register(dev_priv, reg, mask, value, 2, + return __intel_wait_for_register(uncore, reg, mask, value, 2, timeout_ms, NULL); } -int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + +int __intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, unsigned int fast_timeout_us, unsigned int slow_timeout_ms, u32 *out_value); -static inline -int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, +static inline int +intel_wait_for_register_fw(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, unsigned int timeout_ms) { - return __intel_wait_for_register_fw(dev_priv, reg, mask, value, + return __intel_wait_for_register_fw(uncore, reg, mask, value, 2, timeout_ms, NULL); } +/* register access functions */ +#define __raw_read(x__, s__) \ +static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ + i915_reg_t reg) \ +{ \ + return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ +} + +#define __raw_write(x__, s__) \ +static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ + i915_reg_t reg, u##x__ val) \ +{ \ + write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ +} +__raw_read(8, b) +__raw_read(16, w) +__raw_read(32, l) +__raw_read(64, q) + +__raw_write(8, b) +__raw_write(16, w) +__raw_write(32, l) +__raw_write(64, q) + +#undef __raw_read +#undef __raw_write + +#define __uncore_read(name__, x__, s__, trace__) \ +static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \ + i915_reg_t reg) \ +{ \ + return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \ +} + +#define __uncore_write(name__, x__, s__, trace__) \ +static inline void intel_uncore_##name__(struct intel_uncore *uncore, \ + i915_reg_t reg, u##x__ val) \ +{ \ + uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \ +} + +__uncore_read(read8, 8, b, true) +__uncore_read(read16, 16, w, true) +__uncore_read(read, 32, l, true) +__uncore_read(read16_notrace, 16, w, false) +__uncore_read(read_notrace, 32, l, false) + +__uncore_write(write8, 8, b, true) +__uncore_write(write16, 16, w, true) +__uncore_write(write, 32, l, true) +__uncore_write(write_notrace, 32, l, false) + +/* Be very careful with read/write 64-bit values. On 32-bit machines, they + * will be implemented using 2 32-bit writes in an arbitrary order with + * an arbitrary delay between them. This can cause the hardware to + * act upon the intermediate value, possibly leading to corruption and + * machine death. For this reason we do not support I915_WRITE64, or + * uncore->funcs.mmio_writeq. + * + * When reading a 64-bit value as two 32-bit values, the delay may cause + * the two reads to mismatch, e.g. a timestamp overflowing. Also note that + * occasionally a 64-bit register does not actually support a full readq + * and must be read using two 32-bit reads. + * + * You have been warned. + */ +__uncore_read(read64, 64, q, true) + +static inline u64 +intel_uncore_read64_2x32(struct intel_uncore *uncore, + i915_reg_t lower_reg, i915_reg_t upper_reg) +{ + u32 upper, lower, old_upper, loop = 0; + upper = intel_uncore_read(uncore, upper_reg); + do { + old_upper = upper; + lower = intel_uncore_read(uncore, lower_reg); + upper = intel_uncore_read(uncore, upper_reg); + } while (upper != old_upper && loop++ < 2); + return (u64)upper << 32 | lower; +} + +#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__)) +#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) + +#undef __uncore_read +#undef __uncore_write + +/* These are untraced mmio-accessors that are only valid to be used inside + * critical sections, such as inside IRQ handlers, where forcewake is explicitly + * controlled. + * + * Think twice, and think again, before using these. + * + * As an example, these accessors can possibly be used between: + * + * spin_lock_irq(&uncore->lock); + * intel_uncore_forcewake_get__locked(); + * + * and + * + * intel_uncore_forcewake_put__locked(); + * spin_unlock_irq(&uncore->lock); + * + * + * Note: some registers may not need forcewake held, so + * intel_uncore_forcewake_{get,put} can be omitted, see + * intel_uncore_forcewake_for_reg(). + * + * Certain architectures will die if the same cacheline is concurrently accessed + * by different clients (e.g. on Ivybridge). Access to registers should + * therefore generally be serialised, by either the dev_priv->uncore.lock or + * a more localised lock guarding all access to that bank of registers. + */ +#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__) +#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__) +#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__) +#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__)) + +static inline void intel_uncore_rmw_or_fw(struct intel_uncore *uncore, + i915_reg_t reg, u32 or_val) +{ + intel_uncore_write_fw(uncore, reg, + intel_uncore_read_fw(uncore, reg) | or_val); +} + #define raw_reg_read(base, reg) \ readl(base + i915_mmio_reg_offset(reg)) #define raw_reg_write(base, reg, value) \ diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index bf3662ad5fed..fdbbb9a53804 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -772,6 +772,9 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c index c56ba0e04044..3f9921ba4a76 100644 --- a/drivers/gpu/drm/i915/intel_vdsc.c +++ b/drivers/gpu/drm/i915/intel_vdsc.c @@ -6,7 +6,6 @@ * Manasi Navare <manasi.d.navare@intel.com> */ -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_drv.h" @@ -318,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component) } } -static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) -{ - unsigned long groups_per_line = 0; - unsigned long groups_total = 0; - unsigned long num_extra_mux_bits = 0; - unsigned long slice_bits = 0; - unsigned long hrd_delay = 0; - unsigned long final_scale = 0; - unsigned long rbs_min = 0; - - /* Number of groups used to code each line of a slice */ - groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, - DSC_RC_PIXELS_PER_GROUP); - - /* chunksize in Bytes */ - vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * - vdsc_cfg->bits_per_pixel, - (8 * 16)); - - if (vdsc_cfg->convert_rgb) - num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) - - 2); - else - num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) + - 2 * (4 * vdsc_cfg->bits_per_component) - 2; - /* Number of bits in one Slice */ - slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; - - while ((num_extra_mux_bits > 0) && - ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) - num_extra_mux_bits--; - - if (groups_per_line < vdsc_cfg->initial_scale_value - 8) - vdsc_cfg->initial_scale_value = groups_per_line + 8; - - /* scale_decrement_interval calculation according to DSC spec 1.11 */ - if (vdsc_cfg->initial_scale_value > 8) - vdsc_cfg->scale_decrement_interval = groups_per_line / - (vdsc_cfg->initial_scale_value - 8); - else - vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; - - vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - - (vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; - - if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { - DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); - return -ERANGE; - } - - final_scale = (vdsc_cfg->rc_model_size * 8) / - (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); - if (vdsc_cfg->slice_height > 1) - /* - * NflBpgOffset is 16 bit value with 11 fractional bits - * hence we multiply by 2^11 for preserving the - * fractional part - */ - vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), - (vdsc_cfg->slice_height - 1)); - else - vdsc_cfg->nfl_bpg_offset = 0; - - /* 2^16 - 1 */ - if (vdsc_cfg->nfl_bpg_offset > 65535) { - DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); - return -ERANGE; - } - - /* Number of groups used to code the entire slice */ - groups_total = groups_per_line * vdsc_cfg->slice_height; - - /* slice_bpg_offset is 16 bit value with 11 fractional bits */ - vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - - vdsc_cfg->initial_offset + - num_extra_mux_bits) << 11), - groups_total); - - if (final_scale > 9) { - /* - * ScaleIncrementInterval = - * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) - * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, - * we need divide by 2^11 from pstDscCfg values - */ - vdsc_cfg->scale_increment_interval = - (vdsc_cfg->final_offset * (1 << 11)) / - ((vdsc_cfg->nfl_bpg_offset + - vdsc_cfg->slice_bpg_offset) * - (final_scale - 9)); - } else { - /* - * If finalScaleValue is less than or equal to 9, a value of 0 should - * be used to disable the scale increment at the end of the slice - */ - vdsc_cfg->scale_increment_interval = 0; - } - - if (vdsc_cfg->scale_increment_interval > 65535) { - DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); - return -ERANGE; - } - - /* - * DSC spec mentions that bits_per_pixel specifies the target - * bits/pixel (bpp) rate that is used by the encoder, - * in steps of 1/16 of a bit per pixel - */ - rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + - DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel, 16) + - groups_per_line * vdsc_cfg->first_line_bpg_offset; - - hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); - vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; - vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; - - return 0; -} - int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { @@ -492,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; /* Gen 11 does not support YCbCr */ - vdsc_cfg->enable422 = false; + vdsc_cfg->simple_422 = false; /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; vdsc_cfg->block_pred_enable = @@ -575,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); - return intel_compute_rc_parameters(vdsc_cfg); + return drm_dsc_compute_rc_parameters(vdsc_cfg); } enum intel_display_power_domain @@ -619,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) pps_val |= DSC_COLOR_SPACE_CONVERSION; - if (vdsc_cfg->enable422) + if (vdsc_cfg->simple_422) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; @@ -1005,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ - drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); + drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ - drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); + drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg); intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_PPS, &dp_dsc_pps_sdp, @@ -1083,6 +959,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) I915_WRITE(dss_ctl2_reg, dss_ctl2_val); /* Disable Power wells for VDSC/joining */ - intel_display_power_put(dev_priv, - intel_dsc_power_domain(old_crtc_state)); + intel_display_power_put_unchecked(dev_priv, + intel_dsc_power_domain(old_crtc_state)); } diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 92cb82dd0c07..f82a415ea2ba 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, { int err = 0; - if (IS_GEN9(i915)) + if (IS_GEN(i915, 9)) err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size); if (!err && - (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0))) + (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0))) err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size); return err; @@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) u32 guc_wopcm_rsvd; int err; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; GEM_BUG_ON(!wopcm->size); diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4f41e326f3f3..a04dbc58ec1c 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -142,7 +142,8 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) } static void -__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) { struct i915_wa wa = { .reg = reg, @@ -153,16 +154,32 @@ __wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) _wa_add(wal, &wa); } -#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val)) +static void +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val)); +} + +static void +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, ~0, val); +} + +static void +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, val, val); +} #define WA_SET_BIT_MASKED(addr, mask) \ - WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) #define WA_CLR_BIT_MASKED(addr, mask) \ - WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) + wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask)) #define WA_SET_FIELD_MASKED(addr, mask, value) \ - WA_REG(addr, (mask), _MASKED_FIELD(mask, value)) + wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) { @@ -366,7 +383,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine) * Only consider slices where one, and only one, subslice has 7 * EUs */ - if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i])) + if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i])) continue; /* @@ -375,7 +392,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine) * * -> 0 <= ss <= 3; */ - ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1; + ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1; vals[i] = 3 - ss; } @@ -532,6 +549,17 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); + + /* WaEnableFloatBlendOptimization:icl */ + wa_write_masked_or(wal, + GEN10_CACHE_MODE_SS, + 0, /* write-only, so skip validation */ + _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); + + /* WaDisableGPGPUMidThreadPreemption:icl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) @@ -541,26 +569,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) wa_init_start(wal, "context"); - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - bdw_ctx_workarounds_init(engine); - else if (IS_CHERRYVIEW(i915)) - chv_ctx_workarounds_init(engine); - else if (IS_SKYLAKE(i915)) - skl_ctx_workarounds_init(engine); - else if (IS_BROXTON(i915)) - bxt_ctx_workarounds_init(engine); - else if (IS_KABYLAKE(i915)) - kbl_ctx_workarounds_init(engine); - else if (IS_GEMINILAKE(i915)) - glk_ctx_workarounds_init(engine); - else if (IS_COFFEELAKE(i915)) - cfl_ctx_workarounds_init(engine); + if (IS_ICELAKE(i915)) + icl_ctx_workarounds_init(engine); else if (IS_CANNONLAKE(i915)) cnl_ctx_workarounds_init(engine); - else if (IS_ICELAKE(i915)) - icl_ctx_workarounds_init(engine); + else if (IS_COFFEELAKE(i915)) + cfl_ctx_workarounds_init(engine); + else if (IS_GEMINILAKE(i915)) + glk_ctx_workarounds_init(engine); + else if (IS_KABYLAKE(i915)) + kbl_ctx_workarounds_init(engine); + else if (IS_BROXTON(i915)) + bxt_ctx_workarounds_init(engine); + else if (IS_SKYLAKE(i915)) + skl_ctx_workarounds_init(engine); + else if (IS_CHERRYVIEW(i915)) + chv_ctx_workarounds_init(engine); + else if (IS_BROADWELL(i915)) + bdw_ctx_workarounds_init(engine); + else if (INTEL_GEN(i915) < 8) + return; else MISSING_CASE(INTEL_GEN(i915)); @@ -603,46 +631,8 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) } static void -wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) -{ - struct i915_wa wa = { - .reg = reg, - .mask = val, - .val = _MASKED_BIT_ENABLE(val) - }; - - _wa_add(wal, &wa); -} - -static void -wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, - u32 val) -{ - struct i915_wa wa = { - .reg = reg, - .mask = mask, - .val = val - }; - - _wa_add(wal, &wa); -} - -static void -wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - wa_write_masked_or(wal, reg, ~0, val); -} - -static void -wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) -{ - wa_write_masked_or(wal, reg, val, val); -} - -static void gen9_gt_workarounds_init(struct drm_i915_private *i915) -{ - struct i915_wa_list *wal = &i915->gt_wa_list; - /* WaDisableKillLogic:bxt,skl,kbl */ if (!IS_COFFEELAKE(i915)) wa_write_or(wal, @@ -666,11 +656,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915) BDW_DISABLE_HDC_INVALIDATION); } -static void skl_gt_workarounds_init(struct drm_i915_private *i915) +static void +skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - gen9_gt_workarounds_init(i915); + gen9_gt_workarounds_init(i915, wal); /* WaDisableGafsUnitClkGating:skl */ wa_write_or(wal, @@ -684,11 +673,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void bxt_gt_workarounds_init(struct drm_i915_private *i915) +static void +bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - gen9_gt_workarounds_init(i915); + gen9_gt_workarounds_init(i915, wal); /* WaInPlaceDecompressionHang:bxt */ wa_write_or(wal, @@ -696,11 +684,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void kbl_gt_workarounds_init(struct drm_i915_private *i915) +static void +kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - gen9_gt_workarounds_init(i915); + gen9_gt_workarounds_init(i915, wal); /* WaDisableDynamicCreditSharing:kbl */ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) @@ -719,16 +706,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void glk_gt_workarounds_init(struct drm_i915_private *i915) +static void +glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - gen9_gt_workarounds_init(i915); + gen9_gt_workarounds_init(i915, wal); } -static void cfl_gt_workarounds_init(struct drm_i915_private *i915) +static void +cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - gen9_gt_workarounds_init(i915); + gen9_gt_workarounds_init(i915, wal); /* WaDisableGafsUnitClkGating:cfl */ wa_write_or(wal, @@ -741,10 +728,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void wa_init_mcr(struct drm_i915_private *dev_priv) +static void +wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal) { - const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); - struct i915_wa_list *wal = &dev_priv->gt_wa_list; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 mcr_slice_subslice_mask; /* @@ -804,11 +791,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) intel_calculate_mcr_s_ss_select(dev_priv)); } -static void cnl_gt_workarounds_init(struct drm_i915_private *i915) +static void +cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - wa_init_mcr(i915); + wa_init_mcr(i915, wal); /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) @@ -822,11 +808,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void icl_gt_workarounds_init(struct drm_i915_private *i915) +static void +icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - wa_init_mcr(i915); + wa_init_mcr(i915, wal); /* WaInPlaceDecompressionHang:icl */ wa_write_or(wal, @@ -879,35 +864,35 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915) GAMT_CHKN_DISABLE_L3_COH_PIPE); } -void intel_gt_init_workarounds(struct drm_i915_private *i915) +static void +gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) { - struct i915_wa_list *wal = &i915->gt_wa_list; - - wa_init_start(wal, "GT"); - - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - return; - else if (IS_CHERRYVIEW(i915)) - return; - else if (IS_SKYLAKE(i915)) - skl_gt_workarounds_init(i915); - else if (IS_BROXTON(i915)) - bxt_gt_workarounds_init(i915); - else if (IS_KABYLAKE(i915)) - kbl_gt_workarounds_init(i915); - else if (IS_GEMINILAKE(i915)) - glk_gt_workarounds_init(i915); - else if (IS_COFFEELAKE(i915)) - cfl_gt_workarounds_init(i915); + if (IS_ICELAKE(i915)) + icl_gt_workarounds_init(i915, wal); else if (IS_CANNONLAKE(i915)) - cnl_gt_workarounds_init(i915); - else if (IS_ICELAKE(i915)) - icl_gt_workarounds_init(i915); + cnl_gt_workarounds_init(i915, wal); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915, wal); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915, wal); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915, wal); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915, wal); + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915, wal); + else if (INTEL_GEN(i915) <= 8) + return; else MISSING_CASE(INTEL_GEN(i915)); +} +void intel_gt_init_workarounds(struct drm_i915_private *i915) +{ + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_start(wal, "GT"); + gt_init_workarounds(i915, wal); wa_init_finish(wal); } @@ -920,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, unsigned int i; for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - fw |= intel_uncore_forcewake_for_reg(dev_priv, + fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, wa->reg, FW_REG_READ | FW_REG_WRITE); @@ -942,7 +927,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) fw = wal_get_fw_for_rmw(dev_priv, wal); spin_lock_irqsave(&dev_priv->uncore.lock, flags); - intel_uncore_forcewake_get__locked(dev_priv, fw); + intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { u32 val = I915_READ_FW(wa->reg); @@ -953,10 +938,8 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) I915_WRITE_FW(wa->reg, val); } - intel_uncore_forcewake_put__locked(dev_priv, fw); + intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw); spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); - - DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); } void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) @@ -1077,30 +1060,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); wa_init_start(w, "whitelist"); - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - return; - else if (IS_CHERRYVIEW(i915)) - return; - else if (IS_SKYLAKE(i915)) - skl_whitelist_build(w); - else if (IS_BROXTON(i915)) - bxt_whitelist_build(w); - else if (IS_KABYLAKE(i915)) - kbl_whitelist_build(w); - else if (IS_GEMINILAKE(i915)) - glk_whitelist_build(w); - else if (IS_COFFEELAKE(i915)) - cfl_whitelist_build(w); + if (IS_ICELAKE(i915)) + icl_whitelist_build(w); else if (IS_CANNONLAKE(i915)) cnl_whitelist_build(w); - else if (IS_ICELAKE(i915)) - icl_whitelist_build(w); + else if (IS_COFFEELAKE(i915)) + cfl_whitelist_build(w); + else if (IS_GEMINILAKE(i915)) + glk_whitelist_build(w); + else if (IS_KABYLAKE(i915)) + kbl_whitelist_build(w); + else if (IS_BROXTON(i915)) + bxt_whitelist_build(w); + else if (IS_SKYLAKE(i915)) + skl_whitelist_build(w); + else if (INTEL_GEN(i915) <= 8) + return; else MISSING_CASE(INTEL_GEN(i915)); @@ -1126,14 +1105,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine) for (; i < RING_MAX_NONPRIV_SLOTS; i++) I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), i915_mmio_reg_offset(RING_NOPID(base))); - - DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); } -static void rcs_engine_wa_init(struct intel_engine_cs *engine) +static void +rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->wa_list; if (IS_ICELAKE(i915)) { /* This is not an Wa. Enable for better image quality */ @@ -1190,8 +1167,8 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine) GEN7_DISABLE_SAMPLER_PREFETCH); } - if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) { - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ + if (IS_GEN_RANGE(i915, 9, 11)) { + /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */ wa_masked_en(wal, GEN7_FF_SLICE_CS_CHICKEN1, GEN9_FFSC_PERCTX_PREEMPT_CTRL); @@ -1211,7 +1188,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine) GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); } - if (IS_GEN9(i915)) { + if (IS_GEN(i915, 9)) { /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, @@ -1237,10 +1214,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine) } } -static void xcs_engine_wa_init(struct intel_engine_cs *engine) +static void +xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - struct i915_wa_list *wal = &engine->wa_list; /* WaKBLVECSSemaphoreWaitPoll:kbl */ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { @@ -1250,6 +1227,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine) } } +static void +engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) + return; + + if (engine->id == RCS0) + rcs_engine_wa_init(engine, wal); + else + xcs_engine_wa_init(engine, wal); +} + void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; @@ -1258,12 +1247,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) return; wa_init_start(wal, engine->name); - - if (engine->id == RCS) - rcs_engine_wa_init(engine); - else - xcs_engine_wa_init(engine); - + engine_init_workarounds(engine, wal); wa_init_finish(wal); } @@ -1273,11 +1257,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine) } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine, - const char *from) -{ - return wa_list_verify(engine->i915, &engine->wa_list, from); -} - #include "selftests/intel_workarounds.c" #endif diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index 7c734714b05e..a1bf51c611a9 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h @@ -9,18 +9,7 @@ #include <linux/slab.h> -struct i915_wa { - i915_reg_t reg; - u32 mask; - u32 val; -}; - -struct i915_wa_list { - const char *name; - struct i915_wa *list; - unsigned int count; - unsigned int wa_count; -}; +#include "intel_workarounds_types.h" static inline void intel_wa_list_free(struct i915_wa_list *wal) { diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h new file mode 100644 index 000000000000..30918da180ff --- /dev/null +++ b/drivers/gpu/drm/i915/intel_workarounds_types.h @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef __INTEL_WORKAROUNDS_TYPES_H__ +#define __INTEL_WORKAROUNDS_TYPES_H__ + +#include <linux/types.h> + +#include "i915_reg.h" + +struct i915_wa { + i915_reg_t reg; + u32 mask; + u32 val; +}; + +struct i915_wa_list { + const char *name; + struct i915_wa *list; + unsigned int count; + unsigned int wa_count; +}; + +#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c index 391f3d9ffdf1..419fd4d6a8f0 100644 --- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c @@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915, if (overflows_type(dma_size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 26c065c8d2c0..90721b54e7ae 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915, if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); @@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); @@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) if (IS_ERR(obj)) return ERR_CAST(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -972,7 +968,6 @@ static int gpu_write(struct i915_vma *vma, { struct i915_request *rq; struct i915_vma *batch; - int flags = 0; int err; GEM_BUG_ON(!intel_engine_can_store_dword(engine)); @@ -981,14 +976,14 @@ static int gpu_write(struct i915_vma *vma, if (err) return err; - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); - batch = gpu_write_dw(vma, dword * sizeof(u32), value); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_request; + if (IS_ERR(batch)) + return PTR_ERR(batch); + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; } err = i915_vma_move_to_active(batch, rq, 0); @@ -996,21 +991,21 @@ static int gpu_write(struct i915_vma *vma, goto err_request; i915_gem_object_set_active_reference(batch->obj); - i915_vma_unpin(batch); - i915_vma_close(batch); - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); if (err) goto err_request; - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); +err_request: if (err) i915_request_skip(rq, err); - -err_request: i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); + i915_vma_close(batch); return err; } @@ -1450,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!HAS_FULL_48BIT_PPGTT(dev_priv)) { + if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } @@ -1536,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg) * land in the now stale 2M page. */ - err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); if (err) goto out_unpin; @@ -1585,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg) } *vaddr = 0xdeadbeaf; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, vm, NULL); @@ -1654,7 +1650,7 @@ static int igt_shrink_thp(void *arg) if (err) goto out_unpin; - err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); if (err) goto out_unpin; @@ -1703,7 +1699,6 @@ int i915_gem_huge_page_mock_selftests(void) }; struct drm_i915_private *dev_priv; struct i915_hw_ppgtt *ppgtt; - struct pci_dev *pdev; int err; dev_priv = mock_gem_device(); @@ -1711,19 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; - - pdev = dev_priv->drm.pdev; - dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); + mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; + mkwrite_device_info(dev_priv)->ppgtt_size = 48; mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; } - if (!i915_vm_is_48bit(&ppgtt->vm)) { + if (!i915_vm_is_4lvl(&ppgtt->vm)) { pr_err("failed to create 48b PPGTT\n"); err = -EINVAL; goto out_close; @@ -1739,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void) err = i915_subtests(tests, ppgtt); out_close: - i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: @@ -1761,6 +1753,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) }; struct drm_file *file; struct i915_gem_context *ctx; + intel_wakeref_t wakeref; int err; if (!HAS_PPGTT(dev_priv)) { @@ -1768,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return 0; } - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return 0; file = mock_file(dev_priv); @@ -1776,7 +1769,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1790,7 +1783,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c new file mode 100644 index 000000000000..27d8f853111b --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_selftest.h" + +#include "igt_flush_test.h" +#include "lib_sw_fence.h" + +struct live_active { + struct i915_active base; + bool retired; +}; + +static void __live_active_retire(struct i915_active *base) +{ + struct live_active *active = container_of(base, typeof(*active), base); + + active->retired = true; +} + +static int __live_active_setup(struct drm_i915_private *i915, + struct live_active *active) +{ + struct intel_engine_cs *engine; + struct i915_sw_fence *submit; + enum intel_engine_id id; + unsigned int count = 0; + int err = 0; + + submit = heap_fence_create(GFP_KERNEL); + if (!submit) + return -ENOMEM; + + i915_active_init(i915, &active->base, __live_active_retire); + active->retired = false; + + if (!i915_active_acquire(&active->base)) { + pr_err("First i915_active_acquire should report being idle\n"); + err = -EINVAL; + goto out; + } + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + if (err >= 0) + err = i915_active_ref(&active->base, + rq->fence.context, rq); + i915_request_add(rq); + if (err) { + pr_err("Failed to track active ref!\n"); + break; + } + + count++; + } + + i915_active_release(&active->base); + if (active->retired && count) { + pr_err("i915_active retired before submission!\n"); + err = -EINVAL; + } + if (active->base.count != count) { + pr_err("i915_active not tracking all requests, found %d, expected %d\n", + active->base.count, count); + err = -EINVAL; + } + +out: + i915_sw_fence_commit(submit); + heap_fence_put(submit); + + return err; +} + +static int live_active_wait(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct live_active active; + intel_wakeref_t wakeref; + int err; + + /* Check that we get a callback when requests retire upon waiting */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + err = __live_active_setup(i915, &active); + + i915_active_wait(&active.base); + if (!active.retired) { + pr_err("i915_active not retired after waiting!\n"); + err = -EINVAL; + } + + i915_active_fini(&active.base); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static int live_active_retire(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct live_active active; + intel_wakeref_t wakeref; + int err; + + /* Check that we get a callback when requests are indirectly retired */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + err = __live_active_setup(i915, &active); + + /* waits for & retires all requests */ + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + if (!active.retired) { + pr_err("i915_active not retired after flushing!\n"); + err = -EINVAL; + } + + i915_active_fini(&active.base); + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +int i915_active_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_active_wait), + SUBTEST(live_active_retire), + }; + + if (i915_terminally_wedged(i915)) + return 0; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index d0aa19d17653..50bb7bbd26d3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915, { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915, i915_request_add(rq); } - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); return err; } @@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915) static void simulate_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -76,56 +79,51 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } static int pm_prepare(struct drm_i915_private *i915) { - int err = 0; - - if (i915_gem_suspend(i915)) { - pr_err("i915_gem_suspend failed\n"); - err = -EINVAL; - } + i915_gem_suspend(i915); - return err; + return 0; } static void pm_suspend(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - i915_gem_suspend_late(i915); + intel_wakeref_t wakeref; - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + } } static void pm_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; - i915_gem_suspend_gtt_mappings(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); - i915_gem_freeze(i915); - i915_gem_freeze_late(i915); - - intel_runtime_pm_put(i915); + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + } } static void pm_resume(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; + /* * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - intel_runtime_pm_get(i915); - - intel_engines_sanitize(i915); - i915_gem_sanitize(i915); - i915_gem_resume(i915); - - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) { + intel_engines_sanitize(i915, false); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + } } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index f7392c1ffe75..e43630b40fce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); + rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915) static bool needs_fence_registers(struct drm_i915_private *i915) { - return !i915_terminally_wedged(&i915->gpu_error); + return !i915_terminally_wedged(i915); } static bool needs_mi_store_dword(struct drm_i915_private *i915) { - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return false; - return intel_engine_can_store_dword(i915->engine[RCS]); + return intel_engine_can_store_dword(i915->engine[RCS0]); } static const struct igt_coherency_mode { @@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg) struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; unsigned long count, n; u32 *offsets, *values; int err = 0; @@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 7d82043aff10..4e1b6efc6b22 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -24,9 +24,13 @@ #include <linux/prime_numbers.h> +#include "../i915_reset.h" #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_live_test.h" +#include "igt_reset.h" +#include "igt_spinner.h" #include "mock_drm.h" #include "mock_gem_device.h" @@ -34,84 +38,6 @@ #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; - - unsigned int reset_global; - unsigned int reset_engine[I915_NUM_ENGINES]; -}; - -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - t->i915 = i915; - t->func = func; - t->name = name; - - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; - } - - i915->gpu_error.missed_irq_rings = 0; - t->reset_global = i915_reset_count(&i915->gpu_error); - - for_each_engine(engine, i915, id) - t->reset_engine[id] = - i915_reset_engine_count(&i915->gpu_error, engine); - - return 0; -} - -static int end_live_test(struct live_test *t) -{ - struct drm_i915_private *i915 = t->i915; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - return -EIO; - - if (t->reset_global != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_global); - return -EIO; - } - - for_each_engine(engine, i915, id) { - if (t->reset_engine[id] == - i915_reset_engine_count(&i915->gpu_error, engine)) - continue; - - pr_err("%s(%s): engine '%s' was reset %d times!\n", - t->func, t->name, engine->name, - i915_reset_engine_count(&i915->gpu_error, engine) - - t->reset_engine[id]); - return -EIO; - } - - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; - } - - return 0; -} - static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; @@ -119,8 +45,9 @@ static int live_nop_switch(void *arg) struct intel_engine_cs *engine; struct i915_gem_context **ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; - struct live_test t; unsigned long n; int err = -ENODEV; @@ -140,7 +67,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -149,7 +76,7 @@ static int live_nop_switch(void *arg) } for (n = 0; n < nctx; n++) { - ctx[n] = i915_gem_create_context(i915, file->driver_priv); + ctx[n] = live_context(i915, file); if (IS_ERR(ctx[n])) { err = PTR_ERR(ctx[n]); goto out_unlock; @@ -184,7 +111,7 @@ static int live_nop_switch(void *arg) pr_info("Populated %d contexts on %s in %lluns\n", nctx, engine->name, ktime_to_ns(times[1] - times[0])); - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -232,7 +159,7 @@ static int live_nop_switch(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -243,7 +170,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -293,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) offset += PAGE_SIZE; } *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); @@ -445,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) return 0; } -static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) +static noinline int cpu_check(struct drm_i915_gem_object *obj, + unsigned int idx, unsigned int max) { unsigned int n, m, needs_flush; int err; @@ -463,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) for (m = 0; m < max; m++) { if (map[m] != m) { - pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], m); + pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n", + __builtin_return_address(0), idx, + n, real_page_count(obj), m, max, + map[m], m); err = -EINVAL; goto out_unmap; } @@ -472,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) for (; m < DW_PER_PAGE; m++) { if (map[m] != STACK_MAGIC) { - pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], STACK_MAGIC); + pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n", + __builtin_return_address(0), idx, n, m, + map[m], STACK_MAGIC); err = -EINVAL; goto out_unmap; } @@ -551,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj = NULL; - unsigned long ncontexts, ndwords, dw; - struct drm_file *file; - IGT_TIMEOUT(end_time); - LIST_HEAD(objects); - struct live_test t; + struct intel_engine_cs *engine; + enum intel_engine_id id; int err = -ENODEV; /* @@ -568,86 +496,675 @@ static int igt_ctx_exec(void *arg) if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; + for_each_engine(engine, i915, id) { + struct drm_i915_gem_object *obj = NULL; + unsigned long ncontexts, ndwords, dw; + struct igt_live_test t; + struct drm_file *file; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!engine->context_size) + continue; /* No logical context support in HW */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + goto out_unlock; + + ncontexts = 0; + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + } + + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + + ndwords++; + ncontexts++; + } + + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); + + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + + err = cpu_check(obj, ncontexts++, rem); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + if (err) + return err; + } + + return 0; +} + +static int igt_shared_ctx_exec(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *parent; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_live_test t; + struct drm_file *file; + int err = 0; + + /* + * Create a few different contexts with the same mm and write + * through each ctx using the GPU making sure those writes end + * up in the expected pages of our obj. + */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + parent = live_context(i915, file); + if (IS_ERR(parent)) { + err = PTR_ERR(parent); + goto out_unlock; + } + + if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ + err = 0; + goto out_unlock; + } + + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; - ncontexts = 0; - ndwords = 0; - dw = 0; - while (!time_after(jiffies, end_time)) { - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - unsigned int id; + for_each_engine(engine, i915, id) { + unsigned long ncontexts, ndwords, dw; + struct drm_i915_gem_object *obj = NULL; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); - ctx = i915_gem_create_context(i915, file->driver_priv); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } + if (!intel_engine_can_store_dword(engine)) + continue; - for_each_engine(engine, i915, id) { - if (!engine->context_size) - continue; /* No logical context support in HW */ + dw = 0; + ndwords = 0; + ncontexts = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_test; + } - if (!intel_engine_can_store_dword(engine)) - continue; + __assign_ppgtt(ctx, parent->ppgtt); if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(parent, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); - goto out_unlock; + kernel_context_close(ctx); + goto out_test; } } - intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, yesno(!!ctx->ppgtt), err); - goto out_unlock; + kernel_context_close(ctx); + goto out_test; } if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; } + ndwords++; + ncontexts++; + + kernel_context_close(ctx); + } + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); + + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + + err = cpu_check(obj, ncontexts++, rem); + if (err) + goto out_test; + + dw += rem; } - ncontexts++; } - pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n", - ncontexts, INTEL_INFO(i915)->num_rings, ndwords); +out_test: + if (igt_live_test_end(&t)) + err = -EIO; +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); - dw = 0; - list_for_each_entry(obj, &objects, st_link) { - unsigned int rem = - min_t(unsigned int, ndwords - dw, max_dwords(obj)); + mock_file_free(i915, file); + return err; +} - err = cpu_check(obj, rem); - if (err) - break; +static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj; + u32 *cmd; + int err; - dw += rem; + if (INTEL_GEN(vma->vm->i915) < 8) + return ERR_PTR(-EINVAL); + + obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *cmd++ = MI_STORE_REGISTER_MEM_GEN8; + *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); + *cmd++ = lower_32_bits(vma->node.start); + *cmd++ = upper_32_bits(vma->node.start); + *cmd = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int +emit_rpcs_query(struct drm_i915_gem_object *obj, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct i915_request **rq_out) +{ + struct i915_request *rq; + struct i915_vma *batch; + struct i915_vma *vma; + int err; + + GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + return err; + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + batch = rpcs_query_batch(vma); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_vma; + } + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(batch->obj); + i915_vma_unpin(batch); + i915_vma_close(batch); + + i915_vma_unpin(vma); + + *rq_out = i915_request_get(rq); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); +err_vma: + i915_vma_unpin(vma); + + return err; +} + +#define TEST_IDLE BIT(0) +#define TEST_BUSY BIT(1) +#define TEST_RESET BIT(2) + +static int +__sseu_prepare(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct igt_spinner **spin) +{ + struct i915_request *rq; + int ret; + + *spin = NULL; + if (!(flags & (TEST_BUSY | TEST_RESET))) + return 0; + + *spin = kzalloc(sizeof(**spin), GFP_KERNEL); + if (!*spin) + return -ENOMEM; + + ret = igt_spinner_init(*spin, i915); + if (ret) + goto err_free; + + rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto err_fini; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(*spin, rq)) { + pr_err("%s: Spinner failed to start!\n", name); + ret = -ETIMEDOUT; + goto err_end; } + return 0; + +err_end: + igt_spinner_end(*spin); +err_fini: + igt_spinner_fini(*spin); +err_free: + kfree(fetch_and_zero(spin)); + return ret; +} + +static int +__read_slice_count(struct drm_i915_private *i915, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + struct igt_spinner *spin, + u32 *rpcs) +{ + struct i915_request *rq = NULL; + u32 s_mask, s_shift; + unsigned int cnt; + u32 *buf, val; + long ret; + + ret = emit_rpcs_query(obj, ctx, engine, &rq); + if (ret) + return ret; + + if (spin) + igt_spinner_end(spin); + + ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + if (ret < 0) + return ret; + + buf = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + return ret; + } + + if (INTEL_GEN(i915) >= 11) { + s_mask = GEN11_RPCS_S_CNT_MASK; + s_shift = GEN11_RPCS_S_CNT_SHIFT; + } else { + s_mask = GEN8_RPCS_S_CNT_MASK; + s_shift = GEN8_RPCS_S_CNT_SHIFT; + } + + val = *buf; + cnt = (val & s_mask) >> s_shift; + *rpcs = val; + + i915_gem_object_unpin_map(obj); + + return cnt; +} + +static int +__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, + const char *prefix, const char *suffix) +{ + if (slices == expected) + return 0; + + if (slices < 0) { + pr_err("%s: %s read slice count failed with %d%s\n", + name, prefix, slices, suffix); + return slices; + } + + pr_err("%s: %s slice count %d is not %u%s\n", + name, prefix, slices, expected, suffix); + + pr_info("RPCS=0x%x; %u%sx%u%s\n", + rpcs, slices, + (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "", + (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT, + (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : ""); + + return -EINVAL; +} + +static int +__sseu_finish(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct i915_gem_context *kctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + unsigned int expected, + struct igt_spinner *spin) +{ + unsigned int slices = + hweight32(intel_device_default_sseu(i915).slice_mask); + u32 rpcs = 0; + int ret = 0; + + if (flags & TEST_RESET) { + ret = i915_reset_engine(engine, "sseu"); + if (ret) + goto out; + } + + ret = __read_slice_count(i915, ctx, engine, obj, + flags & TEST_RESET ? NULL : spin, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); + if (ret) + goto out; + + ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); + +out: + if (spin) + igt_spinner_end(spin); + + if ((flags & TEST_IDLE) && ret == 0) { + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, + "Context", " after idle!"); + } + + return ret; +} + +static int +__sseu_test(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + struct intel_sseu sseu) +{ + struct igt_spinner *spin = NULL; + struct i915_gem_context *kctx; + int ret; + + kctx = kernel_context(i915); + if (IS_ERR(kctx)) + return PTR_ERR(kctx); + + ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); + if (ret) + goto out_context; + + ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); + if (ret) + goto out_spin; + + ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, + hweight32(sseu.slice_mask), spin); + +out_spin: + if (spin) { + igt_spinner_end(spin); + igt_spinner_fini(spin); + kfree(spin); + } + +out_context: + kernel_context_close(kctx); + + return ret; +} + +static int +__igt_ctx_sseu(struct drm_i915_private *i915, + const char *name, + unsigned int flags) +{ + struct intel_sseu default_sseu = intel_device_default_sseu(i915); + struct intel_engine_cs *engine = i915->engine[RCS0]; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct intel_sseu pg_sseu; + intel_wakeref_t wakeref; + struct drm_file *file; + int ret; + + if (INTEL_GEN(i915) < 9) + return 0; + + if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) + return 0; + + if (hweight32(default_sseu.slice_mask) < 2) + return 0; + + /* + * Gen11 VME friendly power-gated configuration with half enabled + * sub-slices. + */ + pg_sseu = default_sseu; + pg_sseu.slice_mask = 1; + pg_sseu.subslice_mask = + ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); + + pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", + name, flags, hweight32(default_sseu.slice_mask), + hweight32(pg_sseu.slice_mask)); + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + if (flags & TEST_RESET) + igt_global_reset_lock(i915); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out_unlock; + } + i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_unlock; + } + + wakeref = intel_runtime_pm_get(i915); + + /* First set the default mask. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + if (ret) + goto out_fail; + + /* Then set a power-gated configuration. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + if (ret) + goto out_fail; + + /* Back to defaults. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + if (ret) + goto out_fail; + + /* One last power-gated configuration for the road. */ + ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + if (ret) + goto out_fail; + +out_fail: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + ret = -EIO; + + i915_gem_object_put(obj); + + intel_runtime_pm_put(i915, wakeref); + out_unlock: - if (end_live_test(&t)) - err = -EIO; mutex_unlock(&i915->drm.struct_mutex); + if (flags & TEST_RESET) + igt_global_reset_unlock(i915); + mock_file_free(i915, file); - return err; + + if (ret) + pr_err("%s: Failed with %d!\n", name, ret); + + return ret; +} + +static int igt_ctx_sseu(void *arg) +{ + struct { + const char *name; + unsigned int flags; + } *phase, phases[] = { + { .name = "basic", .flags = 0 }, + { .name = "idle", .flags = TEST_IDLE }, + { .name = "busy", .flags = TEST_BUSY }, + { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET }, + { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE }, + { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE }, + }; + unsigned int i; + int ret = 0; + + for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases); + i++, phase++) + ret = __igt_ctx_sseu(arg, phase->name, phase->flags); + + return ret; } static int igt_ctx_readonly(void *arg) @@ -656,12 +1173,12 @@ static int igt_ctx_readonly(void *arg) struct drm_i915_gem_object *obj = NULL; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; - unsigned long ndwords, dw; + unsigned long idx, ndwords, dw; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct live_test t; int err = -ENODEV; /* @@ -676,11 +1193,11 @@ static int igt_ctx_readonly(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; - ctx = i915_gem_create_context(i915, file->driver_priv); + ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -699,6 +1216,8 @@ static int igt_ctx_readonly(void *arg) unsigned int id; for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + if (!intel_engine_can_store_dword(engine)) continue; @@ -713,9 +1232,9 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -732,9 +1251,10 @@ static int igt_ctx_readonly(void *arg) } } pr_info("Submitted %lu dwords (across %u engines)\n", - ndwords, INTEL_INFO(i915)->num_rings); + ndwords, RUNTIME_INFO(i915)->num_engines); dw = 0; + idx = 0; list_for_each_entry(obj, &objects, st_link) { unsigned int rem = min_t(unsigned int, ndwords - dw, max_dwords(obj)); @@ -744,7 +1264,7 @@ static int igt_ctx_readonly(void *arg) if (i915_gem_object_is_readonly(obj)) num_writes = 0; - err = cpu_check(obj, num_writes); + err = cpu_check(obj, idx++, num_writes); if (err) break; @@ -752,7 +1272,7 @@ static int igt_ctx_readonly(void *arg) } out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -808,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, } *cmd++ = value; *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -905,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, *cmd++ = result; } *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { @@ -976,10 +1491,11 @@ static int igt_vm_isolation(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); unsigned long count; - struct live_test t; unsigned int id; u64 vm_total; int err; @@ -998,17 +1514,17 @@ static int igt_vm_isolation(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; - ctx_a = i915_gem_create_context(i915, file->driver_priv); + ctx_a = live_context(i915, file); if (IS_ERR(ctx_a)) { err = PTR_ERR(ctx_a); goto out_unlock; } - ctx_b = i915_gem_create_context(i915, file->driver_priv); + ctx_b = live_context(i915, file); if (IS_ERR(ctx_b)) { err = PTR_ERR(ctx_b); goto out_unlock; @@ -1022,7 +1538,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); count = 0; for_each_engine(engine, i915, id) { @@ -1038,7 +1554,7 @@ static int igt_vm_isolation(void *arg) div64_u64_rem(i915_prandom_u64_state(&prng), vm_total, &offset); - offset &= ~sizeof(u32); + offset &= -sizeof(u32); offset += I915_GTT_PAGE_SIZE; err = write_to_scratch(ctx_a, engine, @@ -1064,12 +1580,12 @@ static int igt_vm_isolation(void *arg) count += this; } pr_info("Checked %lu scratch offsets across %d engines\n", - count, INTEL_INFO(i915)->num_rings); + count, RUNTIME_INFO(i915)->num_engines); out_rpm: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -1078,10 +1594,10 @@ out_unlock: } static __maybe_unused const char * -__engine_name(struct drm_i915_private *i915, unsigned int engines) +__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) { struct intel_engine_cs *engine; - unsigned int tmp; + intel_engine_mask_t tmp; if (engines == ALL_ENGINES) return "all"; @@ -1094,67 +1610,60 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines) static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, struct i915_gem_context *ctx, - unsigned int engines) + intel_engine_mask_t engines) { struct intel_engine_cs *engine; - unsigned int tmp; - int err; + intel_engine_mask_t tmp; + int pass; GEM_TRACE("Testing %s\n", __engine_name(i915, engines)); - for_each_engine_masked(engine, i915, engines, tmp) { - struct i915_request *rq; + for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */ + bool from_idle = pass & 1; + int err; - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (!from_idle) { + for_each_engine_masked(engine, i915, engines, tmp) { + struct i915_request *rq; - i915_request_add(rq); - } - - err = i915_gem_switch_to_kernel_context(i915); - if (err) - return err; + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) + return PTR_ERR(rq); - for_each_engine_masked(engine, i915, engines, tmp) { - if (!engine_has_kernel_context_barrier(engine)) { - pr_err("kernel context not last on engine %s!\n", - engine->name); - return -EINVAL; + i915_request_add(rq); + } } - } - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) - return err; + err = i915_gem_switch_to_kernel_context(i915, + i915->gt.active_engines); + if (err) + return err; + + if (!from_idle) { + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + } - GEM_BUG_ON(i915->gt.active_requests); - for_each_engine_masked(engine, i915, engines, tmp) { - if (engine->last_retired_context->gem_context != i915->kernel_context) { - pr_err("engine %s not idling in kernel context!\n", - engine->name); + if (i915->gt.active_requests) { + pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n", + i915->gt.active_requests, + pass, from_idle ? "idle" : "busy", + __engine_name(i915, engines), + is_power_of_2(engines) ? "" : "s"); return -EINVAL; } - } - err = i915_gem_switch_to_kernel_context(i915); - if (err) - return err; + /* XXX Bonus points for proving we are the kernel context! */ - if (i915->gt.active_requests) { - pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n", - i915->gt.active_requests); - return -EINVAL; + mutex_unlock(&i915->drm.struct_mutex); + drain_delayed_work(&i915->gt.idle_work); + mutex_lock(&i915->drm.struct_mutex); } - for_each_engine_masked(engine, i915, engines, tmp) { - if (!intel_engine_has_kernel_context(engine)) { - pr_err("kernel context not last on engine %s!\n", - engine->name); - return -EINVAL; - } - } + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + return -EIO; return 0; } @@ -1165,6 +1674,7 @@ static int igt_switch_to_kernel_context(void *arg) struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; int err; /* @@ -1175,7 +1685,7 @@ static int igt_switch_to_kernel_context(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -1197,20 +1707,125 @@ static int igt_switch_to_kernel_context(void *arg) out_unlock: GEM_TRACE_DUMP_ON(err); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kernel_context_close(ctx); return err; } +static void mock_barrier_task(void *data) +{ + unsigned int *counter = data; + + ++*counter; +} + +static int mock_context_barrier(void *arg) +{ +#undef pr_fmt +#define pr_fmt(x) "context_barrier_task():" # x + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct i915_request *rq; + intel_wakeref_t wakeref; + unsigned int counter; + int err; + + /* + * The context barrier provides us with a callback after it emits + * a request; useful for retiring old state after loading new. + */ + + mutex_lock(&i915->drm.struct_mutex); + + ctx = mock_context(i915, "mock"); + if (!ctx) { + err = -ENOMEM; + goto unlock; + } + + counter = 0; + err = context_barrier_task(ctx, 0, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately with 0 engines\n"); + err = -EINVAL; + goto out; + } + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately for all inactive engines\n"); + err = -EINVAL; + goto out; + } + + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(i915, wakeref) + rq = i915_request_alloc(i915->engine[RCS0], ctx); + if (IS_ERR(rq)) { + pr_err("Request allocation failed!\n"); + goto out; + } + i915_request_add(rq); + GEM_BUG_ON(list_empty(&ctx->active_engines)); + + counter = 0; + context_barrier_inject_fault = BIT(RCS0); + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + context_barrier_inject_fault = 0; + if (err == -ENXIO) + err = 0; + else + pr_err("Did not hit fault injection!\n"); + if (counter != 0) { + pr_err("Invoked callback on error!\n"); + err = -EIO; + } + if (err) + goto out; + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + mock_device_flush(i915); + if (counter == 0) { + pr_err("Did not retire on each active engines\n"); + err = -EINVAL; + goto out; + } + +out: + mock_context_close(ctx); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + return err; +#undef pr_fmt +#define pr_fmt(x) x +} + int i915_gem_context_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_switch_to_kernel_context), + SUBTEST(mock_context_barrier), }; struct drm_i915_private *i915; int err; @@ -1232,10 +1847,12 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(live_nop_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), + SUBTEST(igt_ctx_sseu), + SUBTEST(igt_shared_ctx_exec), SUBTEST(igt_vm_isolation), }; - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return 0; return i915_subtests(tests, dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index a7055b12e53c..2b943ee246c9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg) goto err; } memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); ptr = dma_buf_kmap(dmabuf, 1); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 4365979d8222..89766688e420 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -29,11 +29,23 @@ #include "mock_drm.h" #include "mock_gem_device.h" -static int populate_ggtt(struct drm_i915_private *i915) +static void quirk_add(struct drm_i915_gem_object *obj, + struct list_head *objects) { + /* quirk is only for live tiled objects, use it to declare ownership */ + GEM_BUG_ON(obj->mm.quirked); + obj->mm.quirked = true; + list_add(&obj->st_link, objects); +} + +static int populate_ggtt(struct drm_i915_private *i915, + struct list_head *objects) +{ + unsigned long unbound, bound, count; struct drm_i915_gem_object *obj; u64 size; + count = 0; for (size = 0; size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; size += I915_GTT_PAGE_SIZE) { @@ -43,21 +55,36 @@ static int populate_ggtt(struct drm_i915_private *i915) if (IS_ERR(obj)) return PTR_ERR(obj); + quirk_add(obj, objects); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) return PTR_ERR(vma); + + count++; } - if (!list_empty(&i915->mm.unbound_list)) { - size = 0; - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - size++; + unbound = 0; + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) + if (obj->mm.quirked) + unbound++; + if (unbound) { + pr_err("%s: Found %lu objects unbound, expected %u!\n", + __func__, unbound, 0); + return -EINVAL; + } - pr_err("Found %lld objects unbound!\n", size); + bound = 0; + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) + if (obj->mm.quirked) + bound++; + if (bound != count) { + pr_err("%s: Found %lu objects bound, expected %lu!\n", + __func__, bound, count); return -EINVAL; } - if (list_empty(&i915->ggtt.vm.inactive_list)) { + if (list_empty(&i915->ggtt.vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -67,21 +94,26 @@ static int populate_ggtt(struct drm_i915_private *i915) static void unpin_ggtt(struct drm_i915_private *i915) { + struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) - i915_vma_unpin(vma); + mutex_lock(&ggtt->vm.mutex); + list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) + if (vma->obj->mm.quirked) + i915_vma_unpin(vma); + mutex_unlock(&ggtt->vm.mutex); } -static void cleanup_objects(struct drm_i915_private *i915) +static void cleanup_objects(struct drm_i915_private *i915, + struct list_head *list) { struct drm_i915_gem_object *obj, *on; - list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link) - i915_gem_object_put(obj); - - list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link) + list_for_each_entry_safe(obj, on, list, st_link) { + GEM_BUG_ON(!obj->mm.quirked); + obj->mm.quirked = false; i915_gem_object_put(obj); + } mutex_unlock(&i915->drm.struct_mutex); @@ -94,11 +126,12 @@ static int igt_evict_something(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict one. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -127,7 +160,7 @@ static int igt_evict_something(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -136,13 +169,14 @@ static int igt_overcommit(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and then try to pin one more. * We expect it to fail. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -152,6 +186,8 @@ static int igt_overcommit(void *arg) goto cleanup; } + quirk_add(obj, &objects); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) { pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma)); @@ -160,7 +196,7 @@ static int igt_overcommit(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -172,11 +208,12 @@ static int igt_evict_for_vma(void *arg) .start = 0, .size = 4096, }; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict a range. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -199,7 +236,7 @@ static int igt_evict_for_vma(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -222,6 +259,7 @@ static int igt_evict_for_cache_color(void *arg) }; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Currently the use of color_adjust is limited to cache domains within @@ -236,7 +274,8 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, I915_GTT_PAGE_SIZE | flags); @@ -251,7 +290,8 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); /* Neighbouring; same colour - should fit */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -287,7 +327,7 @@ static int igt_evict_for_cache_color(void *arg) cleanup: unpin_ggtt(i915); - cleanup_objects(i915); + cleanup_objects(i915, &objects); ggtt->vm.mm.color_adjust = NULL; return err; } @@ -296,11 +336,12 @@ static int igt_evict_vm(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict everything. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -322,7 +363,7 @@ static int igt_evict_vm(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -336,6 +377,7 @@ static int igt_evict_contexts(void *arg) struct drm_mm_node node; struct reserved *next; } *reserved = NULL; + intel_wakeref_t wakeref; struct drm_mm_node hole; unsigned long count; int err; @@ -355,7 +397,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -400,8 +442,10 @@ static int igt_evict_contexts(void *arg) struct drm_file *file; file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); + if (IS_ERR(file)) { + err = PTR_ERR(file); + break; + } count = 0; mutex_lock(&i915->drm.struct_mutex); @@ -411,7 +455,7 @@ static int igt_evict_contexts(void *arg) struct i915_gem_context *ctx; ctx = live_context(i915, file); - if (!ctx) + if (IS_ERR(ctx)) break; /* We will need some GGTT space for the rq's context */ @@ -464,7 +508,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -480,14 +524,17 @@ int i915_gem_evict_mock_selftests(void) SUBTEST(igt_overcommit), }; struct drm_i915_private *i915; - int err; + intel_wakeref_t wakeref; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); + mutex_unlock(&i915->drm.struct_mutex); drm_dev_put(&i915->drm); @@ -500,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 69fe86b30fbb..9cca66e4420a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) goto err; @@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg) * This should ensure that we do not run into the oomkiller during * the test and take down the machine wilfully. */ - limit = totalram_pages << PAGE_SHIFT; + limit = totalram_pages() << PAGE_SHIFT; limit = min(ppgtt->vm.total, limit); /* Check we can allocate the entire range */ @@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); @@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); } count = n; @@ -1009,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1019,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); - i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1144,6 +1144,7 @@ static int igt_ggtt_page(void *arg) struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; struct drm_mm_node tmp; unsigned int *order, n; int err; @@ -1169,7 +1170,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1216,7 +1217,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); @@ -1235,7 +1236,10 @@ static void track_vma_bind(struct i915_vma *vma) __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + + mutex_lock(&vma->vm->mutex); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + mutex_unlock(&vma->vm->mutex); } static int exercise_mock(struct drm_i915_private *i915, @@ -1244,7 +1248,7 @@ static int exercise_mock(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { - const u64 limit = totalram_pages << PAGE_SHIFT; + const u64 limit = totalram_pages() << PAGE_SHIFT; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); @@ -1265,27 +1269,35 @@ static int exercise_mock(struct drm_i915_private *i915, static int igt_mock_fill(void *arg) { - return exercise_mock(arg, fill_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, fill_hole); } static int igt_mock_walk(void *arg) { - return exercise_mock(arg, walk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, walk_hole); } static int igt_mock_pot(void *arg) { - return exercise_mock(arg, pot_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, pot_hole); } static int igt_mock_drunk(void *arg) { - return exercise_mock(arg, drunk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, drunk_hole); } static int igt_gtt_reserve(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; LIST_HEAD(objects); u64 total; @@ -1298,11 +1310,12 @@ static int igt_gtt_reserve(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1316,20 +1329,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1347,11 +1360,12 @@ static int igt_gtt_reserve(void *arg) /* Now we start forcing evictions */ for (total = I915_GTT_PAGE_SIZE; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1365,20 +1379,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1399,7 +1413,7 @@ static int igt_gtt_reserve(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1411,18 +1425,18 @@ static int igt_gtt_reserve(void *arg) goto out; } - offset = random_offset(0, i915->ggtt.vm.total, + offset = random_offset(0, ggtt->vm.total, 2*I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, offset, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1448,7 +1462,7 @@ out: static int igt_gtt_insert(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; struct drm_mm_node tmp = {}; const struct invalid_insert { @@ -1457,8 +1471,8 @@ static int igt_gtt_insert(void *arg) u64 start, end; } invalid_insert[] = { { - i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0, - 0, i915->ggtt.vm.total, + ggtt->vm.total + I915_GTT_PAGE_SIZE, 0, + 0, ggtt->vm.total, }, { 2*I915_GTT_PAGE_SIZE, 0, @@ -1488,7 +1502,7 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { - err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp, + err = i915_gem_gtt_insert(&ggtt->vm, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1503,11 +1517,12 @@ static int igt_gtt_insert(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; + total + I915_GTT_PAGE_SIZE <= ggtt->vm.total; total += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1521,15 +1536,15 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err == -ENOSPC) { /* maxed out the GGTT space */ @@ -1538,7 +1553,7 @@ static int igt_gtt_insert(void *arg) } if (err) { pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1550,7 +1565,7 @@ static int igt_gtt_insert(void *arg) list_for_each_entry(obj, &objects, st_link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1570,7 +1585,7 @@ static int igt_gtt_insert(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1585,13 +1600,13 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1607,11 +1622,12 @@ static int igt_gtt_insert(void *arg) /* And then force evictions */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1625,19 +1641,19 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1664,16 +1680,30 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; + struct i915_ggtt *ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) { + err = -ENOMEM; + goto out_put; + } + mock_init_ggtt(i915, ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(ggtt); + kfree(ggtt); +out_put: drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index c3999dd2021e..971148fbe6f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, u32 *cpu; GEM_BUG_ON(view.partial.size > nreal); + cond_resched(); err = i915_gem_object_set_to_gtt_domain(obj, true); if (err) { @@ -307,6 +308,7 @@ static int igt_partial_tiling(void *arg) const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; int tiling; int err; @@ -332,7 +334,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (1) { IGT_TIMEOUT(end); @@ -443,7 +445,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: @@ -466,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) if (err) return err; - rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); + rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -505,11 +507,13 @@ static void disable_retire_worker(struct drm_i915_private *i915) mutex_lock(&i915->drm.struct_mutex); if (!i915->gt.active_requests++) { - intel_runtime_pm_get(i915); - i915_gem_unpark(i915); - intel_runtime_pm_put(i915); + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) + i915_gem_unpark(i915); } mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); cancel_delayed_work_sync(&i915->gt.idle_work); } @@ -577,7 +581,9 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { - if (i915_terminally_wedged(&i915->gpu_error)) + intel_wakeref_t wakeref; + + if (i915_terminally_wedged(i915)) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); @@ -586,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } + err = 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); - err = make_obj_busy(obj); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + err = make_obj_busy(obj); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a15713cae3b3..6d766925ad04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -12,7 +12,9 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ selftest(uncore, intel_uncore_live_selftests) selftest(workarounds, intel_workarounds_live_selftests) +selftest(timelines, i915_timeline_live_selftests) selftest(requests, i915_request_live_selftests) +selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 1b70208eeea7..88e5ab586337 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -15,8 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) -selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) -selftest(timelines, i915_gem_timeline_mock_selftests) +selftest(timelines, i915_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) selftest(dmabuf, i915_gem_dmabuf_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index 1f415ce47018..716a3f19f030 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -41,18 +41,37 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd) return x; } -void i915_random_reorder(unsigned int *order, unsigned int count, - struct rnd_state *state) +void i915_prandom_shuffle(void *arr, size_t elsz, size_t count, + struct rnd_state *state) { - unsigned int i, j; + char stack[128]; + + if (WARN_ON(elsz > sizeof(stack) || count > U32_MAX)) + return; + + if (!elsz || !count) + return; + + /* Fisher-Yates shuffle courtesy of Knuth */ + while (--count) { + size_t swp; + + swp = i915_prandom_u32_max_state(count + 1, state); + if (swp == count) + continue; - for (i = 0; i < count; i++) { - BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); - j = i915_prandom_u32_max_state(count, state); - swap(order[i], order[j]); + memcpy(stack, arr + count * elsz, elsz); + memcpy(arr + count * elsz, arr + swp * elsz, elsz); + memcpy(arr + swp * elsz, stack, elsz); } } +void i915_random_reorder(unsigned int *order, unsigned int count, + struct rnd_state *state) +{ + i915_prandom_shuffle(order, sizeof(*order), count, state); +} + unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 7dffedc501ca..8e1ff9c105b6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -54,4 +54,7 @@ void i915_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state); +void i915_prandom_shuffle(void *arr, size_t elsz, size_t count, + struct rnd_state *state); + #endif /* !__I915_SELFTESTS_RANDOM_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 07e557815308..e6ffe2240126 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -25,8 +25,12 @@ #include <linux/prime_numbers.h> #include "../i915_selftest.h" +#include "i915_random.h" +#include "igt_live_test.h" +#include "lib_sw_fence.h" #include "mock_context.h" +#include "mock_drm.h" #include "mock_gem_device.h" static int igt_add_request(void *arg) @@ -38,7 +42,7 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], + request = mock_request(i915->engine[RCS0], i915->kernel_context, HZ / 10); if (!request) @@ -62,7 +66,7 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], i915->kernel_context, T); + request = mock_request(i915->engine[RCS0], i915->kernel_context, T); if (!request) { err = -ENOMEM; goto out_unlock; @@ -132,19 +136,17 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], i915->kernel_context, T); + request = mock_request(i915->engine[RCS0], i915->kernel_context, T); if (!request) { err = -ENOMEM; goto out_locked; } - mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */ if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); - goto out_device; + goto out_locked; } - mutex_lock(&i915->drm.struct_mutex); i915_request_add(request); mutex_unlock(&i915->drm.struct_mutex); @@ -191,7 +193,7 @@ static int igt_request_rewind(void *arg) mutex_lock(&i915->drm.struct_mutex); ctx[0] = mock_context(i915, "A"); - request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ); + request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ); if (!request) { err = -ENOMEM; goto err_context_0; @@ -201,7 +203,7 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); - vip = mock_request(i915->engine[RCS], ctx[1], 0); + vip = mock_request(i915->engine[RCS0], ctx[1], 0); if (!vip) { err = -ENOMEM; goto err_context_1; @@ -222,8 +224,7 @@ static int igt_request_rewind(void *arg) mutex_unlock(&i915->drm.struct_mutex); if (i915_request_wait(vip, 0, HZ) == -ETIME) { - pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n", - vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS])); + pr_err("timed out waiting for high priority request\n"); goto err; } @@ -246,93 +247,285 @@ err_context_0: return err; } -int i915_request_mock_selftests(void) +struct smoketest { + struct intel_engine_cs *engine; + struct i915_gem_context **contexts; + atomic_long_t num_waits, num_fences; + int ncontexts, max_batch; + struct i915_request *(*request_alloc)(struct i915_gem_context *, + struct intel_engine_cs *); +}; + +static struct i915_request * +__mock_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - static const struct i915_subtest tests[] = { - SUBTEST(igt_add_request), - SUBTEST(igt_wait_request), - SUBTEST(igt_fence_wait), - SUBTEST(igt_request_rewind), - }; - struct drm_i915_private *i915; - int err; + return mock_request(engine, ctx, 0); +} - i915 = mock_gem_device(); - if (!i915) +static struct i915_request * +__live_request_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return i915_request_alloc(engine, ctx); +} + +static int __igt_breadcrumbs_smoketest(void *arg) +{ + struct smoketest *t = arg; + struct mutex * const BKL = &t->engine->i915->drm.struct_mutex; + const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; + const unsigned int total = 4 * t->ncontexts + 1; + unsigned int num_waits = 0, num_fences = 0; + struct i915_request **requests; + I915_RND_STATE(prng); + unsigned int *order; + int err = 0; + + /* + * A very simple test to catch the most egregious of list handling bugs. + * + * At its heart, we simply create oodles of requests running across + * multiple kthreads and enable signaling on them, for the sole purpose + * of stressing our breadcrumb handling. The only inspection we do is + * that the fences were marked as signaled. + */ + + requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL); + if (!requests) return -ENOMEM; - err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + order = i915_random_order(total, &prng); + if (!order) { + err = -ENOMEM; + goto out_requests; + } - return err; -} + while (!kthread_should_stop()) { + struct i915_sw_fence *submit, *wait; + unsigned int n, count; -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + err = -ENOMEM; + break; + } - unsigned int reset_count; -}; + wait = heap_fence_create(GFP_KERNEL); + if (!wait) { + i915_sw_fence_commit(submit); + heap_fence_put(submit); + err = ENOMEM; + break; + } -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - int err; + i915_random_reorder(order, total, &prng); + count = 1 + i915_prandom_u32_max_state(max_batch, &prng); - t->i915 = i915; - t->func = func; - t->name = name; + for (n = 0; n < count; n++) { + struct i915_gem_context *ctx = + t->contexts[order[n] % t->ncontexts]; + struct i915_request *rq; - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; + mutex_lock(BKL); + + rq = t->request_alloc(ctx, t->engine); + if (IS_ERR(rq)) { + mutex_unlock(BKL); + err = PTR_ERR(rq); + count = n; + break; + } + + err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, + submit, + GFP_KERNEL); + + requests[n] = i915_request_get(rq); + i915_request_add(rq); + + mutex_unlock(BKL); + + if (err >= 0) + err = i915_sw_fence_await_dma_fence(wait, + &rq->fence, + 0, + GFP_KERNEL); + + if (err < 0) { + i915_request_put(rq); + count = n; + break; + } + } + + i915_sw_fence_commit(submit); + i915_sw_fence_commit(wait); + + if (!wait_event_timeout(wait->wait, + i915_sw_fence_done(wait), + HZ / 2)) { + struct i915_request *rq = requests[count - 1]; + + pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n", + count, + rq->fence.context, rq->fence.seqno, + t->engine->name); + i915_gem_set_wedged(t->engine->i915); + GEM_BUG_ON(!i915_request_completed(rq)); + i915_sw_fence_wait(wait); + err = -EIO; + } + + for (n = 0; n < count; n++) { + struct i915_request *rq = requests[n]; + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &rq->fence.flags)) { + pr_err("%llu:%llu was not signaled!\n", + rq->fence.context, rq->fence.seqno); + err = -EINVAL; + } + + i915_request_put(rq); + } + + heap_fence_put(wait); + heap_fence_put(submit); + + if (err < 0) + break; + + num_fences += count; + num_waits++; + + cond_resched(); } - i915->gpu_error.missed_irq_rings = 0; - t->reset_count = i915_reset_count(&i915->gpu_error); + atomic_long_add(num_fences, &t->num_fences); + atomic_long_add(num_waits, &t->num_waits); - return 0; + kfree(order); +out_requests: + kfree(requests); + return err; } -static int end_live_test(struct live_test *t) +static int mock_breadcrumbs_smoketest(void *arg) { - struct drm_i915_private *i915 = t->i915; + struct drm_i915_private *i915 = arg; + struct smoketest t = { + .engine = i915->engine[RCS0], + .ncontexts = 1024, + .max_batch = 1024, + .request_alloc = __mock_request_alloc + }; + unsigned int ncpus = num_online_cpus(); + struct task_struct **threads; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + */ - i915_retire_requests(i915); + threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); + if (!threads) + return -ENOMEM; - if (wait_for(intel_engines_are_idle(i915), 10)) { - pr_err("%s(%s): GPU not idle\n", t->func, t->name); - return -EIO; + t.contexts = + kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); + if (!t.contexts) { + ret = -ENOMEM; + goto out_threads; } - if (t->reset_count != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_count); - return -EIO; + mutex_lock(&t.engine->i915->drm.struct_mutex); + for (n = 0; n < t.ncontexts; n++) { + t.contexts[n] = mock_context(t.engine->i915, "mock"); + if (!t.contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + + for (n = 0; n < ncpus; n++) { + threads[n] = kthread_run(__igt_breadcrumbs_smoketest, + &t, "igt/%d", n); + if (IS_ERR(threads[n])) { + ret = PTR_ERR(threads[n]); + ncpus = n; + break; + } - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; + get_task_struct(threads[n]); } - return 0; + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + + for (n = 0; n < ncpus; n++) { + int err; + + err = kthread_stop(threads[n]); + if (err < 0 && !ret) + ret = err; + + put_task_struct(threads[n]); + } + pr_info("Completed %lu waits for %lu fence across %d cpus\n", + atomic_long_read(&t.num_waits), + atomic_long_read(&t.num_fences), + ncpus); + + mutex_lock(&t.engine->i915->drm.struct_mutex); +out_contexts: + for (n = 0; n < t.ncontexts; n++) { + if (!t.contexts[n]) + break; + mock_context_close(t.contexts[n]); + } + mutex_unlock(&t.engine->i915->drm.struct_mutex); + kfree(t.contexts); +out_threads: + kfree(threads); + + return ret; +} + +int i915_request_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_add_request), + SUBTEST(igt_wait_request), + SUBTEST(igt_fence_wait), + SUBTEST(igt_request_rewind), + SUBTEST(mock_breadcrumbs_smoketest), + }; + struct drm_i915_private *i915; + intel_wakeref_t wakeref; + int err = 0; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + + return err; } static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; unsigned int id; int err = -ENODEV; @@ -342,7 +535,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -350,7 +543,7 @@ static int live_nop_request(void *arg) IGT_TIMEOUT(end_time); ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -392,7 +585,7 @@ static int live_nop_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -403,7 +596,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -426,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) } *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_chipset_flush(i915); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -478,7 +669,8 @@ static int live_empty_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; unsigned int id; int err = 0; @@ -489,7 +681,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -503,7 +695,7 @@ static int live_empty_request(void *arg) unsigned long n, prime; ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_batch; @@ -539,7 +731,7 @@ static int live_empty_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_batch; @@ -553,7 +745,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -583,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (err) goto err; - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -605,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + i915_gem_chipset_flush(i915); + return vma; err: @@ -637,8 +827,9 @@ static int live_all_engines(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_request *request[I915_NUM_ENGINES]; + intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; - struct live_test t; unsigned int id; int err; @@ -648,9 +839,9 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -722,7 +913,7 @@ static int live_all_engines(void *arg) request[id] = NULL; } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) @@ -731,7 +922,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -742,7 +933,8 @@ static int live_sequential_engines(void *arg) struct i915_request *request[I915_NUM_ENGINES] = {}; struct i915_request *prev = NULL; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; + struct igt_live_test t; unsigned int id; int err; @@ -753,9 +945,9 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -838,7 +1030,7 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(!i915_request_completed(request[id])); } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) { @@ -860,11 +1052,183 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } +static int +max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + struct i915_request *rq; + int ret; + + /* + * Before execlists, all contexts share the same ringbuffer. With + * execlists, each context/engine has a separate ringbuffer and + * for the purposes of this test, inexhaustible. + * + * For the global ringbuffer though, we have to be very careful + * that we do not wrap while preventing the execution of requests + * with a unsignaled fence. + */ + if (HAS_EXECLISTS(ctx->i915)) + return INT_MAX; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + } else { + int sz; + + ret = rq->ring->size - rq->reserved_space; + i915_request_add(rq); + + sz = rq->ring->emit - rq->head; + if (sz < 0) + sz += rq->ring->size; + ret /= sz; + ret /= 2; /* leave half spare, in case of emergency! */ + } + + return ret; +} + +static int live_breadcrumbs_smoketest(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct smoketest t[I915_NUM_ENGINES]; + unsigned int ncpus = num_online_cpus(); + unsigned long num_waits, num_fences; + struct intel_engine_cs *engine; + struct task_struct **threads; + struct igt_live_test live; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + unsigned int n; + int ret = 0; + + /* + * Smoketest our breadcrumb/signal handling for requests across multiple + * threads. A very simple test to only catch the most egregious of bugs. + * See __igt_breadcrumbs_smoketest(); + * + * On real hardware this time. + */ + + wakeref = intel_runtime_pm_get(i915); + + file = mock_file(i915); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto out_rpm; + } + + threads = kcalloc(ncpus * I915_NUM_ENGINES, + sizeof(*threads), + GFP_KERNEL); + if (!threads) { + ret = -ENOMEM; + goto out_file; + } + + memset(&t[0], 0, sizeof(t[0])); + t[0].request_alloc = __live_request_alloc; + t[0].ncontexts = 64; + t[0].contexts = kmalloc_array(t[0].ncontexts, + sizeof(*t[0].contexts), + GFP_KERNEL); + if (!t[0].contexts) { + ret = -ENOMEM; + goto out_threads; + } + + mutex_lock(&i915->drm.struct_mutex); + for (n = 0; n < t[0].ncontexts; n++) { + t[0].contexts[n] = live_context(i915, file); + if (!t[0].contexts[n]) { + ret = -ENOMEM; + goto out_contexts; + } + } + + ret = igt_live_test_begin(&live, i915, __func__, ""); + if (ret) + goto out_contexts; + + for_each_engine(engine, i915, id) { + t[id] = t[0]; + t[id].engine = engine; + t[id].max_batch = max_batches(t[0].contexts[0], engine); + if (t[id].max_batch < 0) { + ret = t[id].max_batch; + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + /* One ring interleaved between requests from all cpus */ + t[id].max_batch /= num_online_cpus() + 1; + pr_debug("Limiting batches to %d requests on %s\n", + t[id].max_batch, engine->name); + + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk; + + tsk = kthread_run(__igt_breadcrumbs_smoketest, + &t[id], "igt/%d.%d", id, n); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + mutex_unlock(&i915->drm.struct_mutex); + goto out_flush; + } + + get_task_struct(tsk); + threads[id * ncpus + n] = tsk; + } + } + mutex_unlock(&i915->drm.struct_mutex); + + msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); + +out_flush: + num_waits = 0; + num_fences = 0; + for_each_engine(engine, i915, id) { + for (n = 0; n < ncpus; n++) { + struct task_struct *tsk = threads[id * ncpus + n]; + int err; + + if (!tsk) + continue; + + err = kthread_stop(tsk); + if (err < 0 && !ret) + ret = err; + + put_task_struct(tsk); + } + + num_waits += atomic_long_read(&t[id].num_waits); + num_fences += atomic_long_read(&t[id].num_fences); + } + pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", + num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); + + mutex_lock(&i915->drm.struct_mutex); + ret = igt_live_test_end(&live) ?: ret; +out_contexts: + mutex_unlock(&i915->drm.struct_mutex); + kfree(t[0].contexts); +out_threads: + kfree(threads); +out_file: + mock_file_free(i915, file); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + + return ret; +} + int i915_request_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -872,9 +1236,10 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_all_engines), SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), + SUBTEST(live_breadcrumbs_smoketest), }; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 86c54ea37f48..b18eaefef798 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -133,7 +133,7 @@ static int __run_selftests(const char *name, if (signal_pending(current)) return -EINTR; - pr_debug(DRIVER_NAME ": Running %s\n", st->name); + pr_info(DRIVER_NAME ": Running %s\n", st->name); if (data) err = st->live(data); else @@ -197,6 +197,49 @@ int i915_live_selftests(struct pci_dev *pdev) return 0; } +static bool apply_subtest_filter(const char *caller, const char *name) +{ + char *filter, *sep, *tok; + bool result = true; + + filter = kstrdup(i915_selftest.filter, GFP_KERNEL); + for (sep = filter; (tok = strsep(&sep, ","));) { + bool allow = true; + char *sl; + + if (*tok == '!') { + allow = false; + tok++; + } + + if (*tok == '\0') + continue; + + sl = strchr(tok, '/'); + if (sl) { + *sl++ = '\0'; + if (strcmp(tok, caller)) { + if (allow) + result = false; + continue; + } + tok = sl; + } + + if (strcmp(tok, name)) { + if (allow) + result = false; + continue; + } + + result = allow; + break; + } + kfree(filter); + + return result; +} + int __i915_subtests(const char *caller, const struct i915_subtest *st, unsigned int count, @@ -209,7 +252,10 @@ int __i915_subtests(const char *caller, if (signal_pending(current)) return -EINTR; - pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name); + if (!apply_subtest_filter(caller, st->name)) + continue; + + pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name); GEM_TRACE("Running %s/%s\n", caller, st->name); err = st->func(data); @@ -244,6 +290,7 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...) module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400); module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400); +module_param_named(st_filter, i915_selftest.filter, charp, 0400); module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400); MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)"); diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index cdbc8f134e5e..cbf45d85cbff 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -571,21 +571,27 @@ static int test_timer(void *arg) unsigned long target, delay; struct timed_fence tf; + preempt_disable(); timed_fence_init(&tf, target = jiffies); if (!i915_sw_fence_done(&tf.fence)) { pr_err("Fence with immediate expiration not signaled\n"); goto err; } + preempt_enable(); timed_fence_fini(&tf); for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) { + preempt_disable(); timed_fence_init(&tf, target = jiffies + delay); if (i915_sw_fence_done(&tf.fence)) { pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay); goto err; } + preempt_enable(); i915_sw_fence_wait(&tf.fence); + + preempt_disable(); if (!i915_sw_fence_done(&tf.fence)) { pr_err("Fence not signaled after wait\n"); goto err; @@ -595,13 +601,14 @@ static int test_timer(void *arg) target, jiffies); goto err; } - + preempt_enable(); timed_fence_fini(&tf); } return 0; err: + preempt_enable(); timed_fence_fini(&tf); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 19f1c6a5c8fb..8e7bcaa1eb66 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -4,12 +4,155 @@ * Copyright © 2017-2018 Intel Corporation */ +#include <linux/prime_numbers.h> + #include "../i915_selftest.h" #include "i915_random.h" +#include "igt_flush_test.h" #include "mock_gem_device.h" #include "mock_timeline.h" +static struct page *hwsp_page(struct i915_timeline *tl) +{ + struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static unsigned long hwsp_cacheline(struct i915_timeline *tl) +{ + unsigned long address = (unsigned long)page_address(hwsp_page(tl)); + + return (address + tl->hwsp_offset) / CACHELINE_BYTES; +} + +#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) + +struct mock_hwsp_freelist { + struct drm_i915_private *i915; + struct radix_tree_root cachelines; + struct i915_timeline **history; + unsigned long count, max; + struct rnd_state prng; +}; + +enum { + SHUFFLE = BIT(0), +}; + +static void __mock_hwsp_record(struct mock_hwsp_freelist *state, + unsigned int idx, + struct i915_timeline *tl) +{ + tl = xchg(&state->history[idx], tl); + if (tl) { + radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); + i915_timeline_put(tl); + } +} + +static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, + unsigned int count, + unsigned int flags) +{ + struct i915_timeline *tl; + unsigned int idx; + + while (count--) { + unsigned long cacheline; + int err; + + tl = i915_timeline_create(state->i915, NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + cacheline = hwsp_cacheline(tl); + err = radix_tree_insert(&state->cachelines, cacheline, tl); + if (err) { + if (err == -EEXIST) { + pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", + cacheline); + } + i915_timeline_put(tl); + return err; + } + + idx = state->count++ % state->max; + __mock_hwsp_record(state, idx, tl); + } + + if (flags & SHUFFLE) + i915_prandom_shuffle(state->history, + sizeof(*state->history), + min(state->count, state->max), + &state->prng); + + count = i915_prandom_u32_max_state(min(state->count, state->max), + &state->prng); + while (count--) { + idx = --state->count % state->max; + __mock_hwsp_record(state, idx, NULL); + } + + return 0; +} + +static int mock_hwsp_freelist(void *arg) +{ + struct mock_hwsp_freelist state; + const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "linear", 0 }, + { "shuffled", SHUFFLE }, + { }, + }, *p; + unsigned int na; + int err = 0; + + INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); + state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); + + state.i915 = mock_gem_device(); + if (!state.i915) + return -ENOMEM; + + /* + * Create a bunch of timelines and check that their HWSP do not overlap. + * Free some, and try again. + */ + + state.max = PAGE_SIZE / sizeof(*state.history); + state.count = 0; + state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); + if (!state.history) { + err = -ENOMEM; + goto err_put; + } + + mutex_lock(&state.i915->drm.struct_mutex); + for (p = phases; p->name; p++) { + pr_debug("%s(%s)\n", __func__, p->name); + for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { + err = __mock_hwsp_timeline(&state, na, p->flags); + if (err) + goto out; + } + } + +out: + for (na = 0; na < state.max; na++) + __mock_hwsp_record(&state, na, NULL); + mutex_unlock(&state.i915->drm.struct_mutex); + kfree(state.history); +err_put: + drm_dev_put(&state.i915->drm); + return err; +} + struct __igt_sync { const char *name; u32 seqno; @@ -256,12 +399,444 @@ static int bench_sync(void *arg) return 0; } -int i915_gem_timeline_mock_selftests(void) +int i915_timeline_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(mock_hwsp_freelist), SUBTEST(igt_sync), SUBTEST(bench_sync), }; return i915_subtests(tests, NULL); } + +static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = 0; + *cs++ = value; + } else if (INTEL_GEN(rq->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = addr; + *cs++ = value; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = addr; + *cs++ = value; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_request * +tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */ + + err = i915_timeline_pin(tl); + if (err) { + rq = ERR_PTR(err); + goto out; + } + + rq = i915_request_alloc(engine, engine->i915->kernel_context); + if (IS_ERR(rq)) + goto out_unpin; + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_unpin: + i915_timeline_unpin(tl); +out: + if (IS_ERR(rq)) + pr_err("Failed to write to timeline!\n"); + return rq; +} + +static struct i915_timeline * +checked_i915_timeline_create(struct drm_i915_private *i915) +{ + struct i915_timeline *tl; + + tl = i915_timeline_create(i915, NULL); + if (IS_ERR(tl)) + return tl; + + if (*tl->hwsp_seqno != tl->seqno) { + pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", + *tl->hwsp_seqno, tl->seqno); + i915_timeline_put(tl); + return ERR_PTR(-EINVAL); + } + + return tl; +} + +static int live_hwsp_engine(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + for (n = 0; n < NUM_TIMELINES; n++) { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_alternate(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct i915_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots with adjacent + * engines. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for (n = 0; n < NUM_TIMELINES; n++) { + for_each_engine(engine, i915, id) { + struct i915_timeline *tl; + struct i915_request *rq; + + if (!intel_engine_can_store_dword(engine)) + continue; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct i915_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + i915_timeline_put(tl); + } + + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_wrap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_timeline *tl; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = 0; + + /* + * Across a seqno wrap, we need to keep the old cacheline alive for + * foreign GPU references. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + tl = i915_timeline_create(i915, NULL); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out_rpm; + } + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out_free; + + err = i915_timeline_pin(tl); + if (err) + goto out_free; + + for_each_engine(engine, i915, id) { + const u32 *hwsp_seqno[2]; + struct i915_request *rq; + u32 seqno[2]; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + tl->seqno = -4u; + + err = i915_timeline_get_seqno(tl, rq, &seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", + seqno[0], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[0] = tl->hwsp_seqno; + + err = i915_timeline_get_seqno(tl, rq, &seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", + seqno[1], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[1] = tl->hwsp_seqno; + + /* With wrap should come a new hwsp */ + GEM_BUG_ON(seqno[1] >= seqno[0]); + GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); + + i915_request_add(rq); + + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + err = -EIO; + goto out; + } + + if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", + *hwsp_seqno[0], *hwsp_seqno[1], + seqno[0], seqno[1]); + err = -EINVAL; + goto out; + } + + i915_retire_requests(i915); /* recycle HWSP */ + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + i915_timeline_unpin(tl); +out_free: + i915_timeline_put(tl); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +static int live_hwsp_recycle(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count; + int err = 0; + + /* + * Check seqno writes into one timeline at a time. We expect to + * recycle the breadcrumb slot between iterations and neither + * want to confuse ourselves or the GPU. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + + if (!intel_engine_can_store_dword(engine)) + continue; + + do { + struct i915_timeline *tl; + struct i915_request *rq; + + tl = checked_i915_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + i915_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + if (i915_request_wait(rq, + I915_WAIT_LOCKED, + HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + i915_timeline_put(tl); + err = -EIO; + goto out; + } + + if (*tl->hwsp_seqno != count) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + count, *tl->hwsp_seqno); + err = -EINVAL; + } + + i915_timeline_put(tl); + count++; + + if (err) + goto out; + + i915_timelines_park(i915); /* Encourage recycling! */ + } while (!__igt_timeout(end_time, NULL)); + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +int i915_timeline_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_hwsp_recycle), + SUBTEST(live_hwsp_engine), + SUBTEST(live_hwsp_alternate), + SUBTEST(live_hwsp_wrap), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index ffa74290e054..fc594b030f5a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -28,6 +28,7 @@ #include "mock_gem_device.h" #include "mock_context.h" +#include "mock_gtt.h" static bool assert_vma(struct i915_vma *vma, struct drm_i915_gem_object *obj, @@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915, static int igt_vma_create(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; + struct drm_i915_private *i915 = ggtt->vm.i915; struct drm_i915_gem_object *obj, *on; struct i915_gem_context *ctx, *cn; unsigned long num_obj, num_ctx; @@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma, static int igt_vma_pin1(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; const struct pin_mode modes[] = { #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } @@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg) VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), - - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), - VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), - INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), + + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end), + VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), + INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), VALID(4096, PIN_GLOBAL), VALID(8192, PIN_GLOBAL), - VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), - NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL), - VALID(i915->ggtt.vm.total, PIN_GLOBAL), - NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL), + VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE), + NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->vm.total - 4096, PIN_GLOBAL), + VALID(ggtt->vm.total, PIN_GLOBAL), + NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), - INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), - VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), + VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) /* Misusing BIAS is a programming error (it is not controllable @@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg) * However, the tests are still quite interesting for checking * variable start, end and size. */ - NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), - NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total), - NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), + NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end), + NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total), + NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), #endif { }, #undef NOSPACE @@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg) * focusing on error handling of boundary conditions. */ - GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm)); + GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm)); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); - vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = checked_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) goto out; @@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a, static int igt_vma_rotate(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; const struct intel_rotation_plane_info planes[] = { { .width = 1, .height = 1, .stride = 1 }, @@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg) * that the page layout within the rotated VMA match our expectations. */ - obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma, static int igt_vma_partial(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; const unsigned int npages = 1021; /* prime! */ struct drm_i915_gem_object *obj; const struct phase { @@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg) * we are returned the same VMA when we later request the same range. */ - obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -670,7 +672,7 @@ static int igt_vma_partial(void *arg) } count = 0; - list_for_each_entry(vma, &obj->vma_list, obj_link) + list_for_each_entry(vma, &obj->vma.list, obj_link) count++; if (count != nvma) { pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n", @@ -699,7 +701,7 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); count = 0; - list_for_each_entry(vma, &obj->vma_list, obj_link) + list_for_each_entry(vma, &obj->vma.list, obj_link) count++; if (count != nvma) { pr_err("(%s) allocated an extra full vma!\n", p->name); @@ -723,17 +725,30 @@ int i915_vma_mock_selftests(void) SUBTEST(igt_vma_partial), }; struct drm_i915_private *i915; + struct i915_ggtt *ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) { + err = -ENOMEM; + goto out_put; + } + mock_init_ggtt(i915, ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(ggtt); + kfree(ggtt); +out_put: drm_dev_put(&i915->drm); return err; } - diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index af66e3d4e23a..94aee4071a66 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) cond_resched(); if (flags & I915_WAIT_LOCKED && - i915_gem_switch_to_kernel_context(i915)) { + i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) { pr_err("Failed to switch back to kernel context; declaring wedged\n"); i915_gem_set_wedged(i915); } @@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) i915_gem_set_wedged(i915); } - return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; + return i915_terminally_wedged(i915); } diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c new file mode 100644 index 000000000000..3e902761cd16 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_drv.h" + +#include "../i915_selftest.h" +#include "igt_flush_test.h" +#include "igt_live_test.h" + +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + lockdep_assert_held(&i915->drm.struct_mutex); + + t->i915 = i915; + t->func = func; + t->name = name; + + err = i915_gem_wait_for_idle(i915, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) { + pr_err("%s(%s): failed to idle before, with err=%d!", + func, name, err); + return err; + } + + t->reset_global = i915_reset_count(&i915->gpu_error); + + for_each_engine(engine, i915, id) + t->reset_engine[id] = + i915_reset_engine_count(&i915->gpu_error, engine); + + return 0; +} + +int igt_live_test_end(struct igt_live_test *t) +{ + struct drm_i915_private *i915 = t->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + return -EIO; + + if (t->reset_global != i915_reset_count(&i915->gpu_error)) { + pr_err("%s(%s): GPU was reset %d times!\n", + t->func, t->name, + i915_reset_count(&i915->gpu_error) - t->reset_global); + return -EIO; + } + + for_each_engine(engine, i915, id) { + if (t->reset_engine[id] == + i915_reset_engine_count(&i915->gpu_error, engine)) + continue; + + pr_err("%s(%s): engine '%s' was reset %d times!\n", + t->func, t->name, engine->name, + i915_reset_engine_count(&i915->gpu_error, engine) - + t->reset_engine[id]); + return -EIO; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h new file mode 100644 index 000000000000..c0e9f99d50de --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef IGT_LIVE_TEST_H +#define IGT_LIVE_TEST_H + +#include "../i915_gem.h" + +struct drm_i915_private; + +struct igt_live_test { + struct drm_i915_private *i915; + const char *func; + const char *name; + + unsigned int reset_global; + unsigned int reset_engine[I915_NUM_ENGINES]; +}; + +/* + * Flush the GPU state before and after the test to ensure that no residual + * code is running on the GPU that may affect this test. Also compare the + * state before and after the test and alert if it unexpectedly changes, + * e.g. if the GPU was reset. + */ +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name); +int igt_live_test_end(struct igt_live_test *t); + +#endif /* IGT_LIVE_TEST_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 8cd34f6e6859..16890dfe74c0 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); @@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws, return hws->node.start + seqno_offset(rq->fence.context); } -static int emit_recurse_batch(struct igt_spinner *spin, - struct i915_request *rq, - u32 arbitration_command) +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) { - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; + int err; + + err = i915_vma_move_to_active(vma, rq, flags); + if (err) + return err; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + return 0; +} + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command) +{ + struct i915_address_space *vm = &ctx->ppgtt->vm; + struct i915_request *rq = NULL; struct i915_vma *hws, *vma; u32 *batch; int err; vma = i915_vma_instance(spin->obj, vm, NULL); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); hws = i915_vma_instance(spin->hws, vm, NULL); if (IS_ERR(hws)) - return PTR_ERR(hws); + return ERR_CAST(hws); err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - return err; + return ERR_PTR(err); err = i915_vma_pin(hws, 0, 0, PIN_USER); if (err) goto unpin_vma; - err = i915_vma_move_to_active(vma, rq, 0); - if (err) + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); } - err = i915_vma_move_to_active(hws, rq, 0); + err = move_to_active(vma, rq, 0); if (err) - goto unpin_hws; + goto cancel_rq; - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } + err = move_to_active(hws, rq, 0); + if (err) + goto cancel_rq; batch = spin->batch; @@ -127,35 +144,25 @@ static int emit_recurse_batch(struct igt_spinner *spin, i915_gem_chipset_flush(spin->i915); - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err; -} - -struct i915_request * -igt_spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; + if (engine->emit_init_breadcrumb && + rq->timeline->has_initial_breadcrumb) { + err = engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; + err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - err = emit_recurse_batch(spin, rq, arbitration_command); +cancel_rq: if (err) { + i915_request_skip(rq, err); i915_request_add(rq); - return ERR_PTR(err); } - - return rq; +unpin_hws: + i915_vma_unpin(hws); +unpin_vma: + i915_vma_unpin(vma); + return err ? ERR_PTR(err) : rq; } static u32 @@ -185,11 +192,6 @@ void igt_spinner_fini(struct igt_spinner *spin) bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), 10) && diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c deleted file mode 100644 index f03b407fdbe2..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" -#include "i915_random.h" - -#include "mock_gem_device.h" -#include "mock_engine.h" - -static int check_rbtree(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - struct rb_node *rb; - int n; - - if (&b->irq_wait->node != rb_first(&b->waiters)) { - pr_err("First waiter does not match first element of wait-tree\n"); - return -EINVAL; - } - - n = find_first_bit(bitmap, count); - for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { - struct intel_wait *w = container_of(rb, typeof(*w), node); - int idx = w - waiters; - - if (!test_bit(idx, bitmap)) { - pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n", - idx, w->seqno); - return -EINVAL; - } - - if (n != idx) { - pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n", - idx, w->seqno, n); - return -EINVAL; - } - - n = find_next_bit(bitmap, count, n + 1); - } - - return 0; -} - -static int check_completion(struct intel_engine_cs *engine, - const unsigned long *bitmap, - const struct intel_wait *waiters, - const int count) -{ - int n; - - for (n = 0; n < count; n++) { - if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap)) - continue; - - pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n", - n, waiters[n].seqno, - intel_wait_complete(&waiters[n]) ? "complete" : "active", - test_bit(n, bitmap) ? "active" : "complete"); - return -EINVAL; - } - - return 0; -} - -static int check_rbtree_empty(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; - - if (b->irq_wait) { - pr_err("Empty breadcrumbs still has a waiter\n"); - return -EINVAL; - } - - if (!RB_EMPTY_ROOT(&b->waiters)) { - pr_err("Empty breadcrumbs, but wait-tree not empty\n"); - return -EINVAL; - } - - return 0; -} - -static int igt_random_insert_remove(void *arg) -{ - const u32 seqno_bias = 0x1000; - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned int *order; - unsigned long *bitmap; - int err = -ENOMEM; - int n; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - order = i915_random_order(count, &prng); - if (!order) - goto out_bitmap; - - for (n = 0; n < count; n++) - intel_wait_init_for_seqno(&waiters[n], seqno_bias + n); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - - /* Add and remove waiters into the rbtree in random order. At each - * step, we verify that the rbtree is correctly ordered. - */ - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_add_wait(engine, &waiters[i]); - __set_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - i915_random_reorder(order, count, &prng); - for (n = 0; n < count; n++) { - int i = order[n]; - - intel_engine_remove_wait(engine, &waiters[i]); - __clear_bit(i, bitmap); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_order; - } - - err = check_rbtree_empty(engine); -out_order: - kfree(order); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -static int igt_insert_complete(void *arg) -{ - const u32 seqno_bias = 0x1000; - struct intel_engine_cs *engine = arg; - struct intel_wait *waiters; - const int count = 4096; - unsigned long *bitmap; - int err = -ENOMEM; - int n, m; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_KERNEL); - if (!bitmap) - goto out_waiters; - - for (n = 0; n < count; n++) { - intel_wait_init_for_seqno(&waiters[n], n + seqno_bias); - intel_engine_add_wait(engine, &waiters[n]); - __set_bit(n, bitmap); - } - err = check_rbtree(engine, bitmap, waiters, count); - if (err) - goto out_bitmap; - - /* On each step, we advance the seqno so that several waiters are then - * complete (we increase the seqno by increasingly larger values to - * retire more and more waiters at once). All retired waiters should - * be woken and removed from the rbtree, and so that we check. - */ - for (n = 0; n < count; n = m) { - int seqno = 2 * n; - - GEM_BUG_ON(find_first_bit(bitmap, count) != n); - - if (intel_wait_complete(&waiters[n])) { - pr_err("waiter[%d, seqno=%d] completed too early\n", - n, waiters[n].seqno); - err = -EINVAL; - goto out_bitmap; - } - - /* complete the following waiters */ - mock_seqno_advance(engine, seqno + seqno_bias); - for (m = n; m <= seqno; m++) { - if (m == count) - break; - - GEM_BUG_ON(!test_bit(m, bitmap)); - __clear_bit(m, bitmap); - } - - intel_engine_remove_wait(engine, &waiters[n]); - RB_CLEAR_NODE(&waiters[n].node); - - err = check_rbtree(engine, bitmap, waiters, count); - if (err) { - pr_err("rbtree corrupt after seqno advance to %d\n", - seqno + seqno_bias); - goto out_bitmap; - } - - err = check_completion(engine, bitmap, waiters, count); - if (err) { - pr_err("completions after seqno advance to %d failed\n", - seqno + seqno_bias); - goto out_bitmap; - } - } - - err = check_rbtree_empty(engine); -out_bitmap: - kfree(bitmap); -out_waiters: - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -struct igt_wakeup { - struct task_struct *tsk; - atomic_t *ready, *set, *done; - struct intel_engine_cs *engine; - unsigned long flags; -#define STOP 0 -#define IDLE 1 - wait_queue_head_t *wq; - u32 seqno; -}; - -static bool wait_for_ready(struct igt_wakeup *w) -{ - DEFINE_WAIT(ready); - - set_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->done)) - wake_up_var(w->done); - - if (test_bit(STOP, &w->flags)) - goto out; - - for (;;) { - prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE); - if (atomic_read(w->ready) == 0) - break; - - schedule(); - } - finish_wait(w->wq, &ready); - -out: - clear_bit(IDLE, &w->flags); - if (atomic_dec_and_test(w->set)) - wake_up_var(w->set); - - return !test_bit(STOP, &w->flags); -} - -static int igt_wakeup_thread(void *arg) -{ - struct igt_wakeup *w = arg; - struct intel_wait wait; - - while (wait_for_ready(w)) { - GEM_BUG_ON(kthread_should_stop()); - - intel_wait_init_for_seqno(&wait, w->seqno); - intel_engine_add_wait(w->engine, &wait); - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (i915_seqno_passed(intel_engine_get_seqno(w->engine), - w->seqno)) - break; - - if (test_bit(STOP, &w->flags)) /* emergency escape */ - break; - - schedule(); - } - intel_engine_remove_wait(w->engine, &wait); - __set_current_state(TASK_RUNNING); - } - - return 0; -} - -static void igt_wake_all_sync(atomic_t *ready, - atomic_t *set, - atomic_t *done, - wait_queue_head_t *wq, - int count) -{ - atomic_set(set, count); - atomic_set(ready, 0); - wake_up_all(wq); - - wait_var_event(set, !atomic_read(set)); - atomic_set(ready, count); - atomic_set(done, count); -} - -static int igt_wakeup(void *arg) -{ - I915_RND_STATE(prng); - struct intel_engine_cs *engine = arg; - struct igt_wakeup *waiters; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); - const int count = 4096; - const u32 max_seqno = count / 4; - atomic_t ready, set, done; - int err = -ENOMEM; - int n, step; - - mock_engine_reset(engine); - - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); - if (!waiters) - goto out_engines; - - /* Create a large number of threads, each waiting on a random seqno. - * Multiple waiters will be waiting for the same seqno. - */ - atomic_set(&ready, count); - for (n = 0; n < count; n++) { - waiters[n].wq = &wq; - waiters[n].ready = &ready; - waiters[n].set = &set; - waiters[n].done = &done; - waiters[n].engine = engine; - waiters[n].flags = BIT(IDLE); - - waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n], - "i915/igt:%d", n); - if (IS_ERR(waiters[n].tsk)) - goto out_waiters; - - get_task_struct(waiters[n].tsk); - } - - for (step = 1; step <= max_seqno; step <<= 1) { - u32 seqno; - - /* The waiter threads start paused as we assign them a random - * seqno and reset the engine. Once the engine is reset, - * we signal that the threads may begin their wait upon their - * seqno. - */ - for (n = 0; n < count; n++) { - GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags)); - waiters[n].seqno = - 1 + prandom_u32_state(&prng) % max_seqno; - } - mock_seqno_advance(engine, 0); - igt_wake_all_sync(&ready, &set, &done, &wq, count); - - /* Simulate the GPU doing chunks of work, with one or more - * seqno appearing to finish at the same time. A random number - * of threads will be waiting upon the update and hopefully be - * woken. - */ - for (seqno = 1; seqno <= max_seqno + step; seqno += step) { - usleep_range(50, 500); - mock_seqno_advance(engine, seqno); - } - GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno); - - /* With the seqno now beyond any of the waiting threads, they - * should all be woken, see that they are complete and signal - * that they are ready for the next test. We wait until all - * threads are complete and waiting for us (i.e. not a seqno). - */ - if (!wait_var_event_timeout(&done, - !atomic_read(&done), 10 * HZ)) { - pr_err("Timed out waiting for %d remaining waiters\n", - atomic_read(&done)); - err = -ETIMEDOUT; - break; - } - - err = check_rbtree_empty(engine); - if (err) - break; - } - -out_waiters: - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - set_bit(STOP, &waiters[n].flags); - } - mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */ - igt_wake_all_sync(&ready, &set, &done, &wq, n); - - for (n = 0; n < count; n++) { - if (IS_ERR(waiters[n].tsk)) - break; - - kthread_stop(waiters[n].tsk); - put_task_struct(waiters[n].tsk); - } - - kvfree(waiters); -out_engines: - mock_engine_flush(engine); - return err; -} - -int intel_breadcrumbs_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_random_insert_remove), - SUBTEST(igt_insert_complete), - SUBTEST(igt_wakeup), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_put(&i915->drm); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 32cba4cae31a..b05a21eaa8f4 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client, dev_priv->preempt_context : dev_priv->kernel_context; if (client->owner != ctx_owner || - client->engines != INTEL_INFO(dev_priv)->ring_mask || + client->engines != INTEL_INFO(dev_priv)->engine_mask || client->priority != client_priority || client->doorbell_id == GUC_DOORBELL_INVALID) return -EINVAL; @@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client) static int igt_guc_clients(void *args) { struct drm_i915_private *dev_priv = args; + intel_wakeref_t wakeref; struct intel_guc *guc; int err = 0; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -225,7 +226,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -238,13 +239,14 @@ unlock: static int igt_guc_doorbells(void *arg) { struct drm_i915_private *dev_priv = arg; + intel_wakeref_t wakeref; struct intel_guc *guc; int i, err = 0; u16 db_id; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -259,7 +261,7 @@ static int igt_guc_doorbells(void *arg) for (i = 0; i < ATTEMPTS; i++) { clients[i] = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, i % GUC_CLIENT_PRIORITY_NUM, dev_priv->kernel_context); @@ -337,7 +339,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 40efbed611de..050bd1e19e02 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) if (IS_ERR(h->ctx)) return PTR_ERR(h->ctx); + GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); + h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(h->hws)) { err = PTR_ERR(h->hws); @@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); @@ -103,52 +105,87 @@ static u64 hws_address(const struct i915_vma *hws, return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); } -static int emit_recurse_batch(struct hang *h, - struct i915_request *rq) +static int move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + int err; + + err = i915_vma_move_to_active(vma, rq, flags); + if (err) + return err; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + return 0; +} + +static struct i915_request * +hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = h->i915; struct i915_address_space *vm = - rq->gem_context->ppgtt ? - &rq->gem_context->ppgtt->vm : - &i915->ggtt.vm; + h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; u32 *batch; int err; + if (i915_gem_object_is_active(h->obj)) { + struct drm_i915_gem_object *obj; + void *vaddr; + + obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vaddr = i915_gem_object_pin_map(obj, + i915_coherent_map_type(h->i915)); + if (IS_ERR(vaddr)) { + i915_gem_object_put(obj); + return ERR_CAST(vaddr); + } + + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); + + h->obj = obj; + h->batch = vaddr; + } + vma = i915_vma_instance(h->obj, vm, NULL); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); hws = i915_vma_instance(h->hws, vm, NULL); if (IS_ERR(hws)) - return PTR_ERR(hws); + return ERR_CAST(hws); err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) - return err; + return ERR_PTR(err); err = i915_vma_pin(hws, 0, 0, PIN_USER); if (err) goto unpin_vma; - err = i915_vma_move_to_active(vma, rq, 0); - if (err) + rq = i915_request_alloc(engine, h->ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); } - err = i915_vma_move_to_active(hws, rq, 0); + err = move_to_active(vma, rq, 0); if (err) - goto unpin_hws; + goto cancel_rq; - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } + err = move_to_active(hws, rq, 0); + if (err) + goto cancel_rq; batch = h->batch; if (INTEL_GEN(i915) >= 8) { @@ -207,58 +244,28 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_END; /* not reached */ i915_gem_chipset_flush(h->i915); + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } + flags = 0; if (INTEL_GEN(vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +cancel_rq: + if (err) { + i915_request_skip(rq, err); + i915_request_add(rq); + } unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); - return err; -} - -static struct i915_request * -hang_create_request(struct hang *h, struct intel_engine_cs *engine) -{ - struct i915_request *rq; - int err; - - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; - - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - h->obj = obj; - h->batch = vaddr; - } - - rq = i915_request_alloc(engine, h->ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(h, rq); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; + return err ? ERR_PTR(err) : rq; } static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) @@ -335,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg) timeout = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) timeout = -EIO; i915_request_put(rq); @@ -364,9 +371,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ igt_global_reset_lock(i915); - set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); - mutex_lock(&i915->drm.struct_mutex); reset_count = i915_reset_count(&i915->gpu_error); i915_reset(i915, ALL_ENGINES, NULL); @@ -375,22 +380,257 @@ static int igt_global_reset(void *arg) pr_err("No GPU reset recorded!\n"); err = -EINVAL; } - mutex_unlock(&i915->drm.struct_mutex); - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; return err; } +static int igt_wedged_reset(void *arg) +{ + struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; + + /* Check that we can recover a wedged device with a GPU reset */ + + igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); + + i915_gem_set_wedged(i915); + + GEM_BUG_ON(!i915_reset_failed(i915)); + i915_reset(i915, ALL_ENGINES, NULL); + + intel_runtime_pm_put(i915, wakeref); + igt_global_reset_unlock(i915); + + return i915_reset_failed(i915) ? -EIO : 0; +} + static bool wait_for_idle(struct intel_engine_cs *engine) { return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; } +static int igt_reset_nop(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + unsigned int reset_count, count; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + IGT_TIMEOUT(end_time); + int err = 0; + + /* Check that we can reset during non-user portions of requests */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + wakeref = intel_runtime_pm_get(i915); + reset_count = i915_reset_count(&i915->gpu_error); + count = 0; + do { + mutex_lock(&i915->drm.struct_mutex); + for_each_engine(engine, i915, id) { + int i; + + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + } + mutex_unlock(&i915->drm.struct_mutex); + + igt_global_reset_lock(i915); + i915_reset(i915, ALL_ENGINES, NULL); + igt_global_reset_unlock(i915); + if (i915_reset_failed(i915)) { + err = -EIO; + break; + } + + if (i915_reset_count(&i915->gpu_error) != + reset_count + ++count) { + pr_err("Full GPU reset not recorded!\n"); + err = -EINVAL; + break; + } + + if (!i915_reset_flush(i915)) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("%s failed to idle after reset\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -EIO; + break; + } + + err = igt_flush_test(i915, 0); + if (err) + break; + } while (time_before(jiffies, end_time)); + pr_info("%s: %d resets\n", __func__, count); + + mutex_lock(&i915->drm.struct_mutex); + err = igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + + intel_runtime_pm_put(i915, wakeref); + +out: + mock_file_free(i915, file); + if (i915_reset_failed(i915)) + err = -EIO; + return err; +} + +static int igt_reset_nop_engine(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + int err = 0; + + /* Check that we can engine-reset during non-user portions */ + + if (!intel_has_reset_engine(i915)) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + wakeref = intel_runtime_pm_get(i915); + for_each_engine(engine, i915, id) { + unsigned int reset_count, reset_engine_count; + unsigned int count; + IGT_TIMEOUT(end_time); + + reset_count = i915_reset_count(&i915->gpu_error); + reset_engine_count = i915_reset_engine_count(&i915->gpu_error, + engine); + count = 0; + + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + do { + int i; + + if (!wait_for_idle(engine)) { + pr_err("%s failed to idle before reset\n", + engine->name); + err = -EIO; + break; + } + + mutex_lock(&i915->drm.struct_mutex); + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + mutex_unlock(&i915->drm.struct_mutex); + + err = i915_reset_engine(engine, NULL); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(&i915->gpu_error) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + if (i915_reset_engine_count(&i915->gpu_error, engine) != + reset_engine_count + ++count) { + pr_err("%s engine reset not recorded!\n", + engine->name); + err = -EINVAL; + break; + } + + if (!i915_reset_flush(i915)) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("%s failed to idle after reset\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -EIO; + break; + } + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + pr_info("%s(%s): %d resets\n", __func__, engine->name, count); + + if (err) + break; + + err = igt_flush_test(i915, 0); + if (err) + break; + } + + mutex_lock(&i915->drm.struct_mutex); + err = igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + + intel_runtime_pm_put(i915, wakeref); +out: + mock_file_free(i915, file); + if (i915_reset_failed(i915)) + err = -EIO; + return err; +} + static int __igt_reset_engine(struct drm_i915_private *i915, bool active) { struct intel_engine_cs *engine; @@ -431,8 +671,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { - u32 seqno = intel_engine_get_seqno(engine); - if (active) { struct i915_request *rq; @@ -451,7 +689,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -461,8 +699,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - GEM_BUG_ON(!rq->global_seqno); - seqno = rq->global_seqno - 1; i915_request_put(rq); } @@ -478,16 +714,15 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - reset_engine_count += active; if (i915_reset_engine_count(&i915->gpu_error, engine) != - reset_engine_count) { - pr_err("%s engine reset %srecorded!\n", - engine->name, active ? "not " : ""); + ++reset_engine_count) { + pr_err("%s engine reset not recorded!\n", + engine->name); err = -EINVAL; break; } - if (!wait_for_idle(engine)) { + if (!i915_reset_flush(i915)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -510,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; if (active) { @@ -552,11 +787,10 @@ static int active_request_put(struct i915_request *rq) return 0; if (i915_request_wait(rq, 0, 5 * HZ) < 0) { - GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n", + GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n", rq->engine->name, rq->fence.context, - rq->fence.seqno, - i915_request_global_seqno(rq)); + rq->fence.seqno); GEM_TRACE_DUMP(); i915_gem_set_wedged(rq->i915); @@ -710,7 +944,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915, set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { - u32 seqno = intel_engine_get_seqno(engine); struct i915_request *rq = NULL; if (flags & TEST_ACTIVE) { @@ -729,7 +962,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(engine, &p, "%s\n", engine->name); @@ -738,9 +971,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915, err = -EIO; break; } - - GEM_BUG_ON(!rq->global_seqno); - seqno = rq->global_seqno - 1; } err = i915_reset_engine(engine, NULL); @@ -753,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915, count++; if (rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("i915_reset_engine(%s:%s):" + " failed to complete request after reset\n", + engine->name, test_name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + i915_request_put(rq); + + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + break; + } + i915_request_put(rq); } @@ -777,10 +1023,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915, reported = i915_reset_engine_count(&i915->gpu_error, engine); reported -= threads[engine->id].resets; - if (reported != (flags & TEST_ACTIVE ? count : 0)) { - pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n", - engine->name, test_name, count, reported, - (flags & TEST_ACTIVE ? count : 0)); + if (reported != count) { + pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", + engine->name, test_name, count, reported); if (!err) err = -EINVAL; } @@ -829,7 +1074,7 @@ unwind: break; } - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; if (flags & TEST_ACTIVE) { @@ -879,20 +1124,14 @@ static int igt_reset_engines(void *arg) return 0; } -static u32 fake_hangcheck(struct i915_request *rq, u32 mask) +static u32 fake_hangcheck(struct drm_i915_private *i915, + intel_engine_mask_t mask) { - struct i915_gpu_error *error = &rq->i915->gpu_error; - u32 reset_count = i915_reset_count(error); - - error->stalled_mask = mask; + u32 count = i915_reset_count(&i915->gpu_error); - /* set_bit() must be after we have setup the backchannel (mask) */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); + i915_reset(i915, mask, NULL); - wake_up_all(&error->wait_queue); - - return reset_count; + return count; } static int igt_reset_wait(void *arg) @@ -904,7 +1143,7 @@ static int igt_reset_wait(void *arg) long timeout; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS])) + if (!intel_engine_can_store_dword(i915->engine[RCS0])) return 0; /* Check that we detect a stuck waiter and issue a reset */ @@ -916,7 +1155,7 @@ static int igt_reset_wait(void *arg) if (err) goto unlock; - rq = hang_create_request(&h, i915->engine[RCS]); + rq = hang_create_request(&h, i915->engine[RCS0]); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto fini; @@ -928,7 +1167,7 @@ static int igt_reset_wait(void *arg) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -938,7 +1177,7 @@ static int igt_reset_wait(void *arg) goto out_rq; } - reset_count = fake_hangcheck(rq, ALL_ENGINES); + reset_count = fake_hangcheck(i915, ALL_ENGINES); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); if (timeout < 0) { @@ -948,7 +1187,6 @@ static int igt_reset_wait(void *arg) goto out_rq; } - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; @@ -963,7 +1201,7 @@ unlock: mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1034,13 +1272,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, struct hang h; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS])) + if (!intel_engine_can_store_dword(i915->engine[RCS0])) return 0; /* Check that we can recover an unbind stuck on a hanging request */ - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); if (err) @@ -1066,7 +1302,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, goto out_obj; } - rq = hang_create_request(&h, i915->engine[RCS]); + rq = hang_create_request(&h, i915->engine[RCS0]); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_obj; @@ -1107,7 +1343,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -1127,7 +1363,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, wait_for_completion(&arg.completion); - if (wait_for(waitqueue_active(&rq->execute), 10)) { + if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { struct drm_printer p = drm_info_printer(i915->drm.dev); pr_err("igt/evict_vma kthread did not wait\n"); @@ -1138,7 +1374,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } out_reset: - fake_hangcheck(rq, intel_engine_flag(rq->engine)); + igt_global_reset_lock(i915); + fake_hangcheck(rq->i915, rq->engine->mask); + igt_global_reset_unlock(i915); if (tsk) { struct igt_wedge_me w; @@ -1159,9 +1397,8 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1302,7 +1539,7 @@ static int igt_reset_queue(void *arg) if (!wait_until_running(&h, prev)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s(%s): Failed to start request %x, at %x\n", + pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, prev->fence.seqno, hws_seqno(&h, prev)); intel_engine_dump(engine, &p, @@ -1317,12 +1554,7 @@ static int igt_reset_queue(void *arg) goto fini; } - reset_count = fake_hangcheck(prev, ENGINE_MASK(id)); - - i915_reset(i915, ENGINE_MASK(id), NULL); - - GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, - &i915->gpu_error.flags)); + reset_count = fake_hangcheck(i915, BIT(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1372,7 +1604,7 @@ unlock: mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1381,7 +1613,7 @@ unlock: static int igt_handle_error(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1413,7 +1645,7 @@ static int igt_handle_error(void *arg) if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); - pr_err("%s: Failed to start request %x, at %x\n", + pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); @@ -1428,7 +1660,7 @@ static int igt_handle_error(void *arg) /* Temporarily disable error capture */ error = xchg(&i915->gpu_error.first_error, (void *)-1); - i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL); + i915_handle_error(i915, engine->mask, 0, NULL); xchg(&i915->gpu_error.first_error, error); @@ -1449,11 +1681,206 @@ err_unlock: return err; } +static void __preempt_begin(void) +{ + preempt_disable(); +} + +static void __preempt_end(void) +{ + preempt_enable(); +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +struct atomic_section { + const char *name; + void (*critical_section_begin)(void); + void (*critical_section_end)(void); +}; + +static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct atomic_section *p, + const char *mode) +{ + struct tasklet_struct * const t = &engine->execlists.tasklet; + int err; + + GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", + engine->name, mode, p->name); + + tasklet_disable_nosync(t); + p->critical_section_begin(); + + err = i915_reset_engine(engine, NULL); + + p->critical_section_end(); + tasklet_enable(t); + + if (err) + pr_err("i915_reset_engine(%s:%s) failed under %s\n", + engine->name, mode, p->name); + + return err; +} + +static int igt_atomic_reset_engine(struct intel_engine_cs *engine, + const struct atomic_section *p) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_request *rq; + struct hang h; + int err; + + err = __igt_atomic_reset_engine(engine, p, "idle"); + if (err) + return err; + + err = hang_init(&h, i915); + if (err) + return err; + + rq = hang_create_request(&h, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (wait_until_running(&h, rq)) { + err = __igt_atomic_reset_engine(engine, p, "active"); + } else { + pr_err("%s(%s): Failed to start request %llx, at %x\n", + __func__, engine->name, + rq->fence.seqno, hws_seqno(&h, rq)); + i915_gem_set_wedged(i915); + err = -EIO; + } + + if (err == 0) { + struct igt_wedge_me w; + + igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) + i915_request_wait(rq, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (i915_reset_failed(i915)) + err = -EIO; + } + + i915_request_put(rq); +out: + hang_fini(&h); + return err; +} + +static void force_reset(struct drm_i915_private *i915) +{ + i915_gem_set_wedged(i915); + i915_reset(i915, 0, NULL); +} + +static int igt_atomic_reset(void *arg) +{ + static const struct atomic_section phases[] = { + { "preempt", __preempt_begin, __preempt_end }, + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } + }; + struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + if (USES_GUC_SUBMISSION(i915)) + return 0; /* guc is dead; long live the guc */ + + igt_global_reset_lock(i915); + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + /* Flush any requests before we get started and check basics */ + force_reset(i915); + if (i915_reset_failed(i915)) + goto unlock; + + if (intel_has_gpu_reset(i915)) { + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + GEM_TRACE("intel_gpu_reset under %s\n", p->name); + + p->critical_section_begin(); + err = intel_gpu_reset(i915, ALL_ENGINES); + p->critical_section_end(); + + if (err) { + pr_err("intel_gpu_reset failed under %s\n", + p->name); + goto out; + } + } + + force_reset(i915); + } + + if (intel_has_reset_engine(i915)) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + const typeof(*phases) *p; + + for (p = phases; p->name; p++) { + err = igt_atomic_reset_engine(engine, p); + if (err) + goto out; + } + } + } + +out: + /* As we poke around the guts, do a full reset before continuing. */ + force_reset(i915); + +unlock: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + igt_global_reset_unlock(i915); + + return err; +} + int intel_hangcheck_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ + SUBTEST(igt_wedged_reset), SUBTEST(igt_hang_sanitycheck), + SUBTEST(igt_reset_nop), + SUBTEST(igt_reset_nop_engine), SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), SUBTEST(igt_reset_engines), @@ -1463,18 +1890,21 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_evict_ppgtt), SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), + SUBTEST(igt_atomic_reset), }; + intel_wakeref_t wakeref; bool saved_hangcheck; int err; if (!intel_has_gpu_reset(i915)) return 0; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); + drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ err = i915_subtests(tests, i915); @@ -1483,7 +1913,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ca461e3a5f27..0d3cae564db8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -4,8 +4,13 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/prime_numbers.h> + +#include "../i915_reset.h" + #include "../i915_selftest.h" #include "igt_flush_test.h" +#include "igt_live_test.h" #include "igt_spinner.h" #include "i915_random.h" @@ -18,13 +23,14 @@ static int live_sanitycheck(void *arg) struct i915_gem_context *ctx; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -65,7 +71,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -77,13 +83,17 @@ static int live_preempt(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + pr_err("Logical preemption supported, but not exposed\n"); + mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -104,8 +114,17 @@ static int live_preempt(void *arg) I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); for_each_engine(engine, i915, id) { + struct igt_live_test t; struct i915_request *rq; + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -141,7 +160,8 @@ static int live_preempt(void *arg) igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + + if (igt_live_test_end(&t)) { err = -EIO; goto err_ctx_lo; } @@ -158,7 +178,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -171,13 +191,14 @@ static int live_late_preempt(void *arg) struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -194,8 +215,17 @@ static int live_late_preempt(void *arg) goto err_ctx_hi; for_each_engine(engine, i915, id) { + struct igt_live_test t; struct i915_request *rq; + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -234,7 +264,8 @@ static int live_late_preempt(void *arg) igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + + if (igt_live_test_end(&t)) { err = -EIO; goto err_ctx_lo; } @@ -251,7 +282,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -263,6 +294,459 @@ err_wedged: goto err_ctx_lo; } +struct preempt_client { + struct igt_spinner spin; + struct i915_gem_context *ctx; +}; + +static int preempt_client_init(struct drm_i915_private *i915, + struct preempt_client *c) +{ + c->ctx = kernel_context(i915); + if (!c->ctx) + return -ENOMEM; + + if (igt_spinner_init(&c->spin, i915)) + goto err_ctx; + + return 0; + +err_ctx: + kernel_context_close(c->ctx); + return -ENOMEM; +} + +static void preempt_client_fini(struct preempt_client *c) +{ + igt_spinner_fini(&c->spin); + kernel_context_close(c->ctx); +} + +static int live_suppress_self_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) + }; + struct preempt_client a, b; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Verify that if a preemption request does not cause a change in + * the current execution order, the preempt-to-idle injection is + * skipped and that we do not accidentally apply it after the CS + * completion event. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + if (USES_GUC_SUBMISSION(i915)) + return 0; /* presume black blox */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &a)) + goto err_unlock; + if (preempt_client_init(i915, &b)) + goto err_client_a; + + for_each_engine(engine, i915, id) { + struct i915_request *rq_a, *rq_b; + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + engine->execlists.preempt_hang.count = 0; + + rq_a = igt_spinner_create_request(&a.spin, + a.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + for (depth = 0; depth < 8; depth++) { + rq_b = igt_spinner_create_request(&b.spin, + b.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + i915_request_add(rq_b); + + GEM_BUG_ON(i915_request_completed(rq_a)); + engine->schedule(rq_a, &attr); + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + swap(a, b); + rq_a = rq_b; + } + igt_spinner_end(&a.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n", + engine->execlists.preempt_hang.count, + depth); + err = -EINVAL; + goto err_client_b; + } + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_b; +} + +static int __i915_sw_fence_call +dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + return NOTIFY_DONE; +} + +static struct i915_request *dummy_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = kzalloc(sizeof(*rq), GFP_KERNEL); + if (!rq) + return NULL; + + INIT_LIST_HEAD(&rq->active_list); + rq->engine = engine; + + i915_sched_node_init(&rq->sched); + + /* mark this request as permanently incomplete */ + rq->fence.seqno = 1; + BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ + rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; + GEM_BUG_ON(i915_request_completed(rq)); + + i915_sw_fence_init(&rq->submit, dummy_notify); + i915_sw_fence_commit(&rq->submit); + + return rq; +} + +static void dummy_request_free(struct i915_request *dummy) +{ + i915_request_mark_complete(dummy); + i915_sched_node_fini(&dummy->sched); + i915_sw_fence_fini(&dummy->submit); + + dma_fence_free(&dummy->fence); +} + +static int live_suppress_wait_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct preempt_client client[4]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + int i; + + /* + * Waiters are given a little priority nudge, but not enough + * to actually cause any preemption. Double check that we do + * not needlessly generate preempt-to-idle cycles. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ + goto err_unlock; + if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ + goto err_client_0; + if (preempt_client_init(i915, &client[2])) /* head of queue */ + goto err_client_1; + if (preempt_client_init(i915, &client[3])) /* bystander */ + goto err_client_2; + + for_each_engine(engine, i915, id) { + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!engine->emit_init_breadcrumb) + continue; + + for (depth = 0; depth < ARRAY_SIZE(client); depth++) { + struct i915_request *rq[ARRAY_SIZE(client)]; + struct i915_request *dummy; + + engine->execlists.preempt_hang.count = 0; + + dummy = dummy_request(engine); + if (!dummy) + goto err_client_3; + + for (i = 0; i < ARRAY_SIZE(client); i++) { + rq[i] = igt_spinner_create_request(&client[i].spin, + client[i].ctx, engine, + MI_NOOP); + if (IS_ERR(rq[i])) { + err = PTR_ERR(rq[i]); + goto err_wedged; + } + + /* Disable NEWCLIENT promotion */ + __i915_active_request_set(&rq[i]->timeline->last_request, + dummy); + i915_request_add(rq[i]); + } + + dummy_request_free(dummy); + + GEM_BUG_ON(i915_request_completed(rq[0])); + if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { + pr_err("%s: First client failed to start\n", + engine->name); + goto err_wedged; + } + GEM_BUG_ON(!i915_request_started(rq[0])); + + if (i915_request_wait(rq[depth], + I915_WAIT_LOCKED | + I915_WAIT_PRIORITY, + 1) != -ETIME) { + pr_err("%s: Waiter depth:%d completed!\n", + engine->name, depth); + goto err_wedged; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + + if (engine->execlists.preempt_hang.count) { + pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", + engine->name, + engine->execlists.preempt_hang.count, + depth); + err = -EINVAL; + goto err_client_3; + } + } + } + + err = 0; +err_client_3: + preempt_client_fini(&client[3]); +err_client_2: + preempt_client_fini(&client[2]); +err_client_1: + preempt_client_fini(&client[1]); +err_client_0: + preempt_client_fini(&client[0]); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_3; +} + +static int live_chain_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct preempt_client hi, lo; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Build a chain AB...BA between two contexts (A, B) and request + * preemption of the last request. It should then complete before + * the previously submitted spinner in B. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &hi)) + goto err_unlock; + + if (preempt_client_init(i915, &lo)) + goto err_client_hi; + + for_each_engine(engine, i915, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct igt_live_test t; + struct i915_request *rq; + int ring_size, count, i; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = igt_spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + ring_size = rq->wa_tail - rq->head; + if (ring_size < 0) + ring_size += rq->ring->size; + ring_size = rq->ring->size / ring_size; + pr_debug("%s(%s): Using maximum of %d requests\n", + __func__, engine->name, ring_size); + + igt_spinner_end(&lo.spin); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) { + pr_err("Timed out waiting to flush %s\n", engine->name); + goto err_wedged; + } + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_wedged; + } + + for_each_prime_number_from(count, 1, ring_size) { + rq = igt_spinner_create_request(&hi.spin, + hi.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (!igt_wait_for_spinner(&hi.spin, rq)) + goto err_wedged; + + rq = igt_spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + for (i = 0; i < count; i++) { + rq = i915_request_alloc(engine, lo.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + } + + rq = i915_request_alloc(engine, hi.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + engine->schedule(rq, &attr); + + igt_spinner_end(&hi.spin); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("Failed to preempt over chain of %d\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + igt_spinner_end(&lo.spin); + + rq = i915_request_alloc(engine, lo.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("Failed to flush low priority chain of %d requests\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + } + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_wedged; + } + } + + err = 0; +err_client_lo: + preempt_client_fini(&lo); +err_client_hi: + preempt_client_fini(&hi); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&hi.spin); + igt_spinner_end(&lo.spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_lo; +} + static int live_preempt_hang(void *arg) { struct drm_i915_private *i915 = arg; @@ -270,6 +754,7 @@ static int live_preempt_hang(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) @@ -279,7 +764,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -374,7 +859,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -522,7 +1007,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", count, flags, - INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); return 0; } @@ -550,7 +1035,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", count, flags, - INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); return 0; } @@ -562,6 +1047,8 @@ static int live_preempt_smoke(void *arg) .ncontext = 1024, }; const unsigned int phase[] = { 0, BATCH }; + intel_wakeref_t wakeref; + struct igt_live_test t; int err = -ENOMEM; u32 *cs; int n; @@ -576,7 +1063,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(smoke.i915); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -592,11 +1079,13 @@ static int live_preempt_smoke(void *arg) for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) cs[n] = MI_ARB_CHECK; cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(smoke.batch); i915_gem_object_unpin_map(smoke.batch); - err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); - if (err) + if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { + err = -EIO; goto err_batch; + } for (n = 0; n < smoke.ncontext; n++) { smoke.contexts[n] = kernel_context(smoke.i915); @@ -615,7 +1104,7 @@ static int live_preempt_smoke(void *arg) } err_ctx: - if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED)) + if (igt_live_test_end(&t)) err = -EIO; for (n = 0; n < smoke.ncontext; n++) { @@ -627,7 +1116,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915); + intel_runtime_pm_put(smoke.i915, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); @@ -640,6 +1129,9 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sanitycheck), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_suppress_self_preempt), + SUBTEST(live_suppress_wait_preempt), + SUBTEST(live_chain_preempt), SUBTEST(live_preempt_hang), SUBTEST(live_preempt_smoke), }; @@ -647,7 +1139,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (!HAS_EXECLISTS(i915)) return 0; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 81d9d31042a9..ee0bc91f7664 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -119,9 +119,132 @@ int intel_uncore_mock_selftests(void) return 0; } -static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv) +static int live_forcewake_ops(void *arg) +{ + static const struct reg { + const char *name; + unsigned long platforms; + unsigned int offset; + } registers[] = { + { + "RING_START", + INTEL_GEN_MASK(6, 7), + 0x38, + }, + { + "RING_MI_MODE", + INTEL_GEN_MASK(8, BITS_PER_LONG), + 0x9c, + } + }; + const struct reg *r; + struct drm_i915_private *i915 = arg; + struct intel_uncore_forcewake_domain *domain; + struct intel_uncore *uncore = &i915->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned int tmp; + int err = 0; + + GEM_BUG_ON(i915->gt.awake); + + /* vlv/chv with their pcu behave differently wrt reads */ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + pr_debug("PCU fakes forcewake badly; skipping\n"); + return 0; + } + + /* We have to pick carefully to get the exact behaviour we need */ + for (r = registers; r->name; r++) + if (r->platforms & INTEL_INFO(i915)->gen_mask) + break; + if (!r->name) { + pr_debug("Forcewaked register not known for %s; skipping\n", + intel_platform_name(INTEL_INFO(i915)->platform)); + return 0; + } + + wakeref = intel_runtime_pm_get(i915); + + for_each_fw_domain(domain, uncore, tmp) { + smp_store_mb(domain->active, false); + if (!hrtimer_cancel(&domain->timer)) + continue; + + intel_uncore_fw_release_timer(&domain->timer); + } + + for_each_engine(engine, i915, id) { + i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset); + u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset; + enum forcewake_domains fw_domains; + u32 val; + + if (!engine->default_state) + continue; + + fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio, + FW_REG_READ); + if (!fw_domains) + continue; + + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + if (!domain->wake_count) + continue; + + pr_err("fw_domain %s still active, aborting test!\n", + intel_uncore_forcewake_domain_to_str(domain->id)); + err = -EINVAL; + goto out_rpm; + } + + intel_uncore_forcewake_get(uncore, fw_domains); + val = readl(reg); + intel_uncore_forcewake_put(uncore, fw_domains); + + /* Flush the forcewake release (delayed onto a timer) */ + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + smp_store_mb(domain->active, false); + if (hrtimer_cancel(&domain->timer)) + intel_uncore_fw_release_timer(&domain->timer); + + preempt_disable(); + err = wait_ack_clear(domain, FORCEWAKE_KERNEL); + preempt_enable(); + if (err) { + pr_err("Failed to clear fw_domain %s\n", + intel_uncore_forcewake_domain_to_str(domain->id)); + goto out_rpm; + } + } + + if (!val) { + pr_err("%s:%s was zero while fw was held!\n", + engine->name, r->name); + err = -EINVAL; + goto out_rpm; + } + + /* We then expect the read to return 0 outside of the fw */ + if (wait_for(readl(reg) == 0, 100)) { + pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n", + engine->name, r->name, readl(reg), fw_domains); + err = -ETIMEDOUT; + goto out_rpm; + } + } + +out_rpm: + intel_runtime_pm_put(i915, wakeref); + return err; +} + +static int live_forcewake_domains(void *arg) { #define FW_RANGE 0x40000 + struct drm_i915_private *dev_priv = arg; + struct intel_uncore *uncore = &dev_priv->uncore; unsigned long *valid; u32 offset; int err; @@ -137,48 +260,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN)) return 0; - valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid), - GFP_KERNEL); + valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL); if (!valid) return -ENOMEM; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - check_for_unclaimed_mmio(dev_priv); + check_for_unclaimed_mmio(uncore); for (offset = 0; offset < FW_RANGE; offset += 4) { i915_reg_t reg = { offset }; (void)I915_READ_FW(reg); - if (!check_for_unclaimed_mmio(dev_priv)) + if (!check_for_unclaimed_mmio(uncore)) set_bit(offset, valid); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); err = 0; for_each_set_bit(offset, valid, FW_RANGE) { i915_reg_t reg = { offset }; iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv); + intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); - check_for_unclaimed_mmio(dev_priv); + check_for_unclaimed_mmio(uncore); (void)I915_READ(reg); - if (check_for_unclaimed_mmio(dev_priv)) { + if (check_for_unclaimed_mmio(uncore)) { pr_err("Unclaimed mmio read to register 0x%04x\n", offset); err = -EINVAL; } } - kfree(valid); + bitmap_free(valid); return err; } int intel_uncore_live_selftests(struct drm_i915_private *i915) { + static const struct i915_subtest tests[] = { + SUBTEST(live_forcewake_ops), + SUBTEST(live_forcewake_domains), + }; + int err; /* Confirm the table we load is still valid */ @@ -188,9 +315,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915) if (err) return err; - err = intel_uncore_check_forcewake_domains(i915); - if (err) - return err; - - return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 67017d5175b8..3baed59008d7 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -5,20 +5,75 @@ */ #include "../i915_selftest.h" +#include "../i915_reset.h" #include "igt_flush_test.h" #include "igt_reset.h" #include "igt_spinner.h" #include "igt_wedge_me.h" #include "mock_context.h" +#include "mock_drm.h" + +static const struct wo_register { + enum intel_platform platform; + u32 reg; +} wo_registers[] = { + { INTEL_GEMINILAKE, 0x731c } +}; + +#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) +struct wa_lists { + struct i915_wa_list gt_wa_list; + struct { + char name[REF_NAME_MAX]; + struct i915_wa_list wa_list; + } engine[I915_NUM_ENGINES]; +}; + +static void +reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + memset(lists, 0, sizeof(*lists)); + + wa_init_start(&lists->gt_wa_list, "GT_REF"); + gt_init_workarounds(i915, &lists->gt_wa_list); + wa_init_finish(&lists->gt_wa_list); + + for_each_engine(engine, i915, id) { + struct i915_wa_list *wal = &lists->engine[id].wa_list; + char *name = lists->engine[id].name; + + snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); + + wa_init_start(wal, name); + engine_init_workarounds(engine, wal); + wa_init_finish(wal); + } +} + +static void +reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + intel_wa_list_free(&lists->engine[id].wa_list); + + intel_wa_list_free(&lists->gt_wa_list); +} static struct drm_i915_gem_object * read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + const u32 base = engine->mmio_base; struct drm_i915_gem_object *result; + intel_wakeref_t wakeref; struct i915_request *rq; struct i915_vma *vma; - const u32 base = engine->mmio_base; u32 srm, *cs; int err; int i; @@ -27,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (IS_ERR(result)) return result; - i915_gem_object_set_cache_level(result, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); cs = i915_gem_object_pin_map(result, I915_MAP_WB); if (IS_ERR(cs)) { @@ -35,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_obj; } memset(cs, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(result); i915_gem_object_unpin_map(result); vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); @@ -47,9 +103,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) + rq = i915_request_alloc(engine, ctx); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -134,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx, err = 0; igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); - if (i915_terminally_wedged(&ctx->i915->gpu_error)) + if (i915_terminally_wedged(ctx->i915)) err = -EIO; if (err) goto out_put; @@ -167,8 +223,7 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); - i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); + i915_reset(engine->i915, engine->mask, "live_workarounds"); return 0; } @@ -183,20 +238,18 @@ switch_to_scratch_context(struct intel_engine_cs *engine, { struct i915_gem_context *ctx; struct i915_request *rq; + intel_wakeref_t wakeref; int err = 0; ctx = kernel_context(engine->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - intel_runtime_pm_get(engine->i915); + GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); - if (spin) + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - else - rq = i915_request_alloc(engine, ctx); - - intel_runtime_pm_put(engine->i915); kernel_context_close(ctx); @@ -225,19 +278,17 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, const char *name) { struct drm_i915_private *i915 = engine->i915; - bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; struct igt_spinner spin; + intel_wakeref_t wakeref; int err; pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", engine->whitelist.count, name); - if (want_spin) { - err = igt_spinner_init(&spin, i915); - if (err) - return err; - } + err = igt_spinner_init(&spin, i915); + if (err) + return err; ctx = kernel_context(i915); if (IS_ERR(ctx)) @@ -249,18 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, goto out; } - err = switch_to_scratch_context(engine, want_spin ? &spin : NULL); + err = switch_to_scratch_context(engine, &spin); if (err) goto out; - intel_runtime_pm_get(i915); - err = reset(engine); - intel_runtime_pm_put(i915); + with_intel_runtime_pm(i915, wakeref) + err = reset(engine); - if (want_spin) { - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - } + igt_spinner_end(&spin); + igt_spinner_fini(&spin); if (err) { pr_err("%s reset failed\n", name); @@ -292,10 +340,379 @@ out: return err; } +static struct i915_vma *create_scratch(struct i915_gem_context *ctx) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void *ptr; + int err; + + obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(ptr)) { + err = PTR_ERR(ptr); + goto err_obj; + } + memset(ptr, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + err = i915_gem_object_set_to_cpu_domain(obj, false); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static struct i915_vma *create_batch(struct i915_gem_context *ctx) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + err = i915_gem_object_set_to_wc_domain(obj, true); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static u32 reg_write(u32 old, u32 new, u32 rsvd) +{ + if (rsvd == 0x0000ffff) { + old &= ~(new >> 16); + old |= new & (new >> 16); + } else { + old &= ~rsvd; + old |= new & rsvd; + } + + return old; +} + +static bool wo_register(struct intel_engine_cs *engine, u32 reg) +{ + enum intel_platform platform = INTEL_INFO(engine->i915)->platform; + int i; + + for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { + if (wo_registers[i].platform == platform && + wo_registers[i].reg == reg) + return true; + } + + return false; +} + +static int check_dirty_whitelist(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + const u32 values[] = { + 0x00000000, + 0x01010101, + 0x10100101, + 0x03030303, + 0x30300303, + 0x05050505, + 0x50500505, + 0x0f0f0f0f, + 0xf00ff00f, + 0x10101010, + 0xf0f01010, + 0x30303030, + 0xa0a03030, + 0x50505050, + 0xc0c05050, + 0xf0f0f0f0, + 0x11111111, + 0x33333333, + 0x55555555, + 0x0000ffff, + 0x00ff00ff, + 0xff0000ff, + 0xffff00ff, + 0xffffffff, + }; + struct i915_vma *scratch; + struct i915_vma *batch; + int err = 0, i, v; + u32 *cs, *results; + + scratch = create_scratch(ctx); + if (IS_ERR(scratch)) + return PTR_ERR(scratch); + + batch = create_batch(ctx); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_scratch; + } + + for (i = 0; i < engine->whitelist.count; i++) { + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + u64 addr = scratch->node.start; + struct i915_request *rq; + u32 srm, lrm, rsvd; + u32 expect; + int idx; + + if (wo_register(engine, reg)) + continue; + + srm = MI_STORE_REGISTER_MEM; + lrm = MI_LOAD_REGISTER_MEM; + if (INTEL_GEN(ctx->i915) >= 8) + lrm++, srm++; + + pr_debug("%s: Writing garbage to %x\n", + engine->name, reg); + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_batch; + } + + /* SRM original */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = ~values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + GEM_BUG_ON(idx * sizeof(u32) > scratch->size); + + /* LRM original -- don't leave garbage in the context! */ + *cs++ = lrm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + i915_gem_object_unpin_map(batch->obj); + i915_gem_chipset_flush(ctx->i915); + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_batch; + } + + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ + err = engine->emit_init_breadcrumb(rq); + if (err) + goto err_request; + } + + err = engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + 0); + if (err) + goto err_request; + +err_request: + i915_request_add(rq); + if (err) + goto out_batch; + + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + pr_err("%s: Futzing %x timedout; cancelling test\n", + engine->name, reg); + i915_gem_set_wedged(ctx->i915); + err = -EIO; + goto out_batch; + } + + results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(results)) { + err = PTR_ERR(results); + goto out_batch; + } + + GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); + rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ + if (!rsvd) { + pr_err("%s: Unable to write to whitelisted register %x\n", + engine->name, reg); + err = -EINVAL; + goto out_unpin; + } + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + expect = reg_write(expect, values[v], rsvd); + if (results[idx] != expect) + err++; + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + expect = reg_write(expect, ~values[v], rsvd); + if (results[idx] != expect) + err++; + idx++; + } + if (err) { + pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", + engine->name, err, reg); + + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", + engine->name, reg, results[0], rsvd); + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = values[v]; + + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = ~values[v]; + + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + + err = -EINVAL; + } +out_unpin: + i915_gem_object_unpin_map(scratch->obj); + if (err) + break; + } + + if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED)) + err = -EIO; +out_batch: + i915_vma_unpin_and_release(&batch, 0); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); + return err; +} + +static int live_dirty_whitelist(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + int err = 0; + + /* Can the user write to the whitelisted registers? */ + + if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ + return 0; + + wakeref = intel_runtime_pm_get(i915); + + mutex_unlock(&i915->drm.struct_mutex); + file = mock_file(i915); + mutex_lock(&i915->drm.struct_mutex); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto out_rpm; + } + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + for_each_engine(engine, i915, id) { + if (engine->whitelist.count == 0) + continue; + + err = check_dirty_whitelist(ctx, engine); + if (err) + goto out_file; + } + +out_file: + mutex_unlock(&i915->drm.struct_mutex); + mock_file_free(i915, file); + mutex_lock(&i915->drm.struct_mutex); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + return err; +} + static int live_reset_whitelist(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; int err = 0; /* If we reset the gpu, we should not lose the RING_NONPRIV */ @@ -326,16 +743,17 @@ out: return err; } -static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) +static bool verify_gt_engine_wa(struct drm_i915_private *i915, + struct wa_lists *lists, const char *str) { struct intel_engine_cs *engine; enum intel_engine_id id; bool ok = true; - ok &= intel_gt_verify_workarounds(i915, str); + ok &= wa_list_verify(i915, &lists->gt_wa_list, str); for_each_engine(engine, i915, id) - ok &= intel_engine_verify_workarounds(engine, str); + ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str); return ok; } @@ -344,7 +762,8 @@ static int live_gpu_reset_gt_engine_workarounds(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_gpu_error *error = &i915->gpu_error; + intel_wakeref_t wakeref; + struct wa_lists lists; bool ok; if (!intel_has_gpu_reset(i915)) @@ -353,19 +772,21 @@ live_gpu_reset_gt_engine_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); + + reference_lists_init(i915, &lists); - ok = verify_gt_engine_wa(i915, "before reset"); + ok = verify_gt_engine_wa(i915, &lists, "before reset"); if (!ok) goto out; - intel_runtime_pm_get(i915); - set_bit(I915_RESET_HANDOFF, &error->flags); i915_reset(i915, ALL_ENGINES, "live_workarounds"); - intel_runtime_pm_put(i915); - ok = verify_gt_engine_wa(i915, "after reset"); + ok = verify_gt_engine_wa(i915, &lists, "after reset"); out: + reference_lists_fini(i915, &lists); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -380,6 +801,8 @@ live_engine_reset_gt_engine_workarounds(void *arg) struct igt_spinner spin; enum intel_engine_id id; struct i915_request *rq; + intel_wakeref_t wakeref; + struct wa_lists lists; int ret = 0; if (!intel_has_reset_engine(i915)) @@ -390,23 +813,24 @@ live_engine_reset_gt_engine_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); + wakeref = intel_runtime_pm_get(i915); + + reference_lists_init(i915, &lists); for_each_engine(engine, i915, id) { bool ok; pr_info("Verifying after %s reset...\n", engine->name); - ok = verify_gt_engine_wa(i915, "before reset"); + ok = verify_gt_engine_wa(i915, &lists, "before reset"); if (!ok) { ret = -ESRCH; goto err; } - intel_runtime_pm_get(i915); i915_reset_engine(engine, "live_workarounds"); - intel_runtime_pm_put(i915); - ok = verify_gt_engine_wa(i915, "after idle reset"); + ok = verify_gt_engine_wa(i915, &lists, "after idle reset"); if (!ok) { ret = -ESRCH; goto err; @@ -416,13 +840,10 @@ live_engine_reset_gt_engine_workarounds(void *arg) if (ret) goto err; - intel_runtime_pm_get(i915); - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); igt_spinner_fini(&spin); - intel_runtime_pm_put(i915); goto err; } @@ -431,19 +852,16 @@ live_engine_reset_gt_engine_workarounds(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { pr_err("Spinner failed to start\n"); igt_spinner_fini(&spin); - intel_runtime_pm_put(i915); ret = -ETIMEDOUT; goto err; } i915_reset_engine(engine, "live_workarounds"); - intel_runtime_pm_put(i915); - igt_spinner_end(&spin); igt_spinner_fini(&spin); - ok = verify_gt_engine_wa(i915, "after busy reset"); + ok = verify_gt_engine_wa(i915, &lists, "after busy reset"); if (!ok) { ret = -ESRCH; goto err; @@ -451,6 +869,8 @@ live_engine_reset_gt_engine_workarounds(void *arg) } err: + reference_lists_fini(i915, &lists); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); @@ -462,13 +882,14 @@ err: int intel_workarounds_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(live_dirty_whitelist), SUBTEST(live_reset_whitelist), SUBTEST(live_gpu_reset_gt_engine_workarounds), SUBTEST(live_engine_reset_gt_engine_workarounds), }; int err; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index b26f07b55d86..2bfa72c1654b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf) destroy_timer_on_stack(&tf->timer); i915_sw_fence_fini(&tf->fence); } + +struct heap_fence { + struct i915_sw_fence fence; + union { + struct kref ref; + struct rcu_head rcu; + }; +}; + +static int __i915_sw_fence_call +heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + switch (state) { + case FENCE_COMPLETE: + break; + + case FENCE_FREE: + heap_fence_put(&h->fence); + } + + return NOTIFY_DONE; +} + +struct i915_sw_fence *heap_fence_create(gfp_t gfp) +{ + struct heap_fence *h; + + h = kmalloc(sizeof(*h), gfp); + if (!h) + return NULL; + + i915_sw_fence_init(&h->fence, heap_fence_notify); + refcount_set(&h->ref.refcount, 2); + + return &h->fence; +} + +static void heap_fence_release(struct kref *ref) +{ + struct heap_fence *h = container_of(ref, typeof(*h), ref); + + i915_sw_fence_fini(&h->fence); + + kfree_rcu(h, rcu); +} + +void heap_fence_put(struct i915_sw_fence *fence) +{ + struct heap_fence *h = container_of(fence, typeof(*h), fence); + + kref_put(&h->ref, heap_fence_release); +} diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h index 474aafb92ae1..1f9927e10f3a 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h @@ -39,4 +39,7 @@ struct timed_fence { void timed_fence_init(struct timed_fence *tf, unsigned long expires); void timed_fence_fini(struct timed_fence *tf); +struct i915_sw_fence *heap_fence_create(gfp_t gfp); +void heap_fence_put(struct i915_sw_fence *fence); + #endif /* _LIB_SW_FENCE_H_ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index d937bdff26f9..0426093bf1d9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; - unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -41,28 +40,31 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + ctx->hw_contexts = RB_ROOT; + spin_lock_init(&ctx->hw_contexts_lock); + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; - - ce->gem_context = ctx; - } + INIT_LIST_HEAD(&ctx->active_engines); + mutex_init(&ctx->mutex); ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) goto err_handles; if (name) { + struct i915_hw_ppgtt *ppgtt; + ctx->name = kstrdup(name, GFP_KERNEL); if (!ctx->name) goto err_put; - ctx->ppgtt = mock_ppgtt(i915, name); - if (!ctx->ppgtt) + ppgtt = mock_ppgtt(i915, name); + if (!ppgtt) goto err_put; + + __set_ppgtt(ctx, ppgtt); } return ctx; @@ -90,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915) struct i915_gem_context * live_context(struct drm_i915_private *i915, struct drm_file *file) { + struct i915_gem_context *ctx; + int err; + lockdep_assert_held(&i915->drm.struct_mutex); - return i915_gem_create_context(i915, file->driver_priv); + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + err = gem_context_register(ctx, file->driver_priv); + if (err < 0) + goto err_ctx; + + return ctx; + +err_ctx: + context_close(ctx); + return ERR_PTR(err); } struct i915_gem_context * diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index d0c44c18db42..61a8206ed677 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -30,84 +30,137 @@ struct mock_ring { struct i915_timeline timeline; }; -static struct mock_request *first_request(struct mock_engine *engine) +static void mock_timeline_pin(struct i915_timeline *tl) +{ + tl->pin_count++; +} + +static void mock_timeline_unpin(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + tl->pin_count--; +} + +static struct intel_ring *mock_ring(struct intel_engine_cs *engine) +{ + const unsigned long sz = PAGE_SIZE / 2; + struct mock_ring *ring; + + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); + if (!ring) + return NULL; + + if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { + kfree(ring); + return NULL; + } + + kref_init(&ring->base.ref); + ring->base.size = sz; + ring->base.effective_size = sz; + ring->base.vaddr = (void *)(ring + 1); + ring->base.timeline = &ring->timeline; + + INIT_LIST_HEAD(&ring->base.request_list); + intel_ring_update_space(&ring->base); + + return &ring->base; +} + +static void mock_ring_free(struct intel_ring *base) +{ + struct mock_ring *ring = container_of(base, typeof(*ring), base); + + i915_timeline_fini(&ring->timeline); + kfree(ring); +} + +static struct i915_request *first_request(struct mock_engine *engine) { return list_first_entry_or_null(&engine->hw_queue, - struct mock_request, - link); + struct i915_request, + mock.link); } -static void advance(struct mock_engine *engine, - struct mock_request *request) +static void advance(struct i915_request *request) { - list_del_init(&request->link); - mock_seqno_advance(&engine->base, request->base.global_seqno); + list_del_init(&request->mock.link); + i915_request_mark_complete(request); + GEM_BUG_ON(!i915_request_completed(request)); + + intel_engine_queue_breadcrumbs(request->engine); } static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); - struct mock_request *request; + struct i915_request *request; + unsigned long flags; - spin_lock(&engine->hw_lock); + spin_lock_irqsave(&engine->hw_lock, flags); /* Timer fired, first request is complete */ request = first_request(engine); if (request) - advance(engine, request); + advance(request); /* * Also immediately signal any subsequent 0-delay requests, but * requeue the timer for the next delayed request. */ while ((request = first_request(engine))) { - if (request->delay) { - mod_timer(&engine->hw_delay, jiffies + request->delay); + if (request->mock.delay) { + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); break; } - advance(engine, request); + advance(request); } - spin_unlock(&engine->hw_lock); + spin_unlock_irqrestore(&engine->hw_lock, flags); } static void mock_context_unpin(struct intel_context *ce) { - i915_gem_context_put(ce->gem_context); + mock_timeline_unpin(ce->ring->timeline); } -static void mock_context_destroy(struct intel_context *ce) +static void mock_context_destroy(struct kref *ref) { - GEM_BUG_ON(ce->pin_count); -} + struct intel_context *ce = container_of(ref, typeof(*ce), ref); -static const struct intel_context_ops mock_context_ops = { - .unpin = mock_context_unpin, - .destroy = mock_context_destroy, -}; + GEM_BUG_ON(intel_context_is_pinned(ce)); -static struct intel_context * -mock_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) -{ - struct intel_context *ce = to_intel_context(ctx, engine); + if (ce->ring) + mock_ring_free(ce->ring); + + intel_context_free(ce); +} - if (!ce->pin_count++) { - i915_gem_context_get(ctx); - ce->ring = engine->buffer; - ce->ops = &mock_context_ops; +static int mock_context_pin(struct intel_context *ce) +{ + if (!ce->ring) { + ce->ring = mock_ring(ce->engine); + if (!ce->ring) + return -ENOMEM; } - return ce; + mock_timeline_pin(ce->ring->timeline); + return 0; } +static const struct intel_context_ops mock_context_ops = { + .pin = mock_context_pin, + .unpin = mock_context_unpin, + + .destroy = mock_context_destroy, +}; + static int mock_request_alloc(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); - - INIT_LIST_HEAD(&mock->link); - mock->delay = 0; + INIT_LIST_HEAD(&request->mock.link); + request->mock.delay = 0; return 0; } @@ -118,61 +171,60 @@ static int mock_emit_flush(struct i915_request *request, return 0; } -static void mock_emit_breadcrumb(struct i915_request *request, - u32 *flags) +static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) { + return cs; } static void mock_submit_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); + unsigned long flags; i915_request_submit(request); - GEM_BUG_ON(!request->global_seqno); - spin_lock_irq(&engine->hw_lock); - list_add_tail(&mock->link, &engine->hw_queue); - if (mock->link.prev == &engine->hw_queue) { - if (mock->delay) - mod_timer(&engine->hw_delay, jiffies + mock->delay); + spin_lock_irqsave(&engine->hw_lock, flags); + list_add_tail(&request->mock.link, &engine->hw_queue); + if (list_is_first(&request->mock.link, &engine->hw_queue)) { + if (request->mock.delay) + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); else - advance(engine, mock); + advance(request); } - spin_unlock_irq(&engine->hw_lock); + spin_unlock_irqrestore(&engine->hw_lock, flags); } -static struct intel_ring *mock_ring(struct intel_engine_cs *engine) +static void mock_reset_prepare(struct intel_engine_cs *engine) { - const unsigned long sz = PAGE_SIZE / 2; - struct mock_ring *ring; - - BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz); +} - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); - if (!ring) - return NULL; +static void mock_reset(struct intel_engine_cs *engine, bool stalled) +{ + GEM_BUG_ON(stalled); +} - i915_timeline_init(engine->i915, &ring->timeline, engine->name); +static void mock_reset_finish(struct intel_engine_cs *engine) +{ +} - ring->base.size = sz; - ring->base.effective_size = sz; - ring->base.vaddr = (void *)(ring + 1); - ring->base.timeline = &ring->timeline; +static void mock_cancel_requests(struct intel_engine_cs *engine) +{ + struct i915_request *request; + unsigned long flags; - INIT_LIST_HEAD(&ring->base.request_list); - intel_ring_update_space(&ring->base); + spin_lock_irqsave(&engine->timeline.lock, flags); - return &ring->base; -} + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->timeline.requests, sched.link) { + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); -static void mock_ring_free(struct intel_ring *base) -{ - struct mock_ring *ring = container_of(base, typeof(*ring), base); + i915_request_mark_complete(request); + } - i915_timeline_fini(&ring->timeline); - kfree(ring); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, @@ -191,39 +243,41 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.i915 = i915; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; - engine->base.status_page.page_addr = (void *)(engine + 1); + engine->base.mask = BIT(id); + engine->base.status_page.addr = (void *)(engine + 1); - engine->base.context_pin = mock_context_pin; + engine->base.cops = &mock_context_ops; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; - engine->base.emit_breadcrumb = mock_emit_breadcrumb; + engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; - i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + engine->base.reset.prepare = mock_reset_prepare; + engine->base.reset.reset = mock_reset; + engine->base.reset.finish = mock_reset_finish; + engine->base.cancel_requests = mock_cancel_requests; + + if (i915_timeline_init(i915, &engine->base.timeline, NULL)) + goto err_free; i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); intel_engine_init_breadcrumbs(&engine->base); - engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ /* fake hw queue */ spin_lock_init(&engine->hw_lock); timer_setup(&engine->hw_delay, hw_delay_complete, 0); INIT_LIST_HEAD(&engine->hw_queue); - engine->base.buffer = mock_ring(&engine->base); - if (!engine->base.buffer) + if (pin_context(i915->kernel_context, &engine->base, + &engine->base.kernel_context)) goto err_breadcrumbs; - if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base))) - goto err_ring; - return &engine->base; -err_ring: - mock_ring_free(engine->base.buffer); err_breadcrumbs: intel_engine_fini_breadcrumbs(&engine->base); i915_timeline_fini(&engine->base.timeline); +err_free: kfree(engine); return NULL; } @@ -232,21 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); - struct mock_request *request, *rn; + struct i915_request *request, *rn; del_timer_sync(&mock->hw_delay); spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, link) { - list_del_init(&request->link); - mock_seqno_advance(&mock->base, request->base.global_seqno); - } + list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) + advance(request); spin_unlock_irq(&mock->hw_lock); } void mock_engine_reset(struct intel_engine_cs *engine) { - intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0); } void mock_engine_free(struct intel_engine_cs *engine) @@ -261,9 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine) if (ce) intel_context_unpin(ce); - __intel_context_unpin(engine->i915->kernel_context, engine); - - mock_ring_free(engine->buffer); + intel_context_unpin(engine->kernel_context); intel_engine_fini_breadcrumbs(engine); i915_timeline_fini(&engine->timeline); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h index 133d0c21790d..b9cc3a245f16 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.h +++ b/drivers/gpu/drm/i915/selftests/mock_engine.h @@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine); void mock_engine_reset(struct intel_engine_cs *engine); void mock_engine_free(struct intel_engine_cs *engine); -static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - intel_engine_wakeup(engine); -} - #endif /* !__MOCK_ENGINE_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 43ed8b28aeaa..60bbf8b4df40 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -58,8 +58,8 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); - cancel_delayed_work_sync(&i915->gt.retire_work); - cancel_delayed_work_sync(&i915->gt.idle_work); + drain_delayed_work(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.idle_work); i915_gem_drain_workqueue(i915); mutex_lock(&i915->drm.struct_mutex); @@ -68,22 +68,17 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_fini(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_timelines_fini(i915); + drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); mutex_lock(&i915->drm.struct_mutex); - mock_fini_ggtt(i915); + mock_fini_ggtt(&i915->ggtt); mutex_unlock(&i915->drm.struct_mutex); - WARN_ON(!list_empty(&i915->gt.timelines)); destroy_workqueue(i915->wq); - kmem_cache_destroy(i915->priorities); - kmem_cache_destroy(i915->dependencies); - kmem_cache_destroy(i915->requests); - kmem_cache_destroy(i915->vmas); - kmem_cache_destroy(i915->objects); - i915_gemfs_fini(i915); drm_mode_config_cleanup(&i915->drm); @@ -114,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work) static void mock_idle_work_handler(struct work_struct *work) { + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gt.idle_work.work); + + i915->gt.active_engines = 0; } static int pm_domain_resume(struct device *dev) @@ -147,22 +146,24 @@ struct drm_i915_private *mock_gem_device(void) pdev->class = PCI_BASE_CLASS_DISPLAY << 16; pdev->dev.release = release_dev; dev_set_name(&pdev->dev, "mock"); - dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) /* hack to disable iommu for the fake device; force identity mapping */ pdev->dev.archdata.iommu = (void *)-1; #endif + i915 = (struct drm_i915_private *)(pdev + 1); + pci_set_drvdata(pdev, i915); + + intel_runtime_pm_init_early(i915); + dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - i915 = (struct drm_i915_private *)(pdev + 1); - pci_set_drvdata(pdev, i915); - err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); if (err) { pr_err("Failed to initialise mock GEM device: err=%d\n", err); @@ -181,11 +182,13 @@ struct drm_i915_private *mock_gem_device(void) I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - mock_uncore_init(i915); + mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); + init_srcu_struct(&i915->gpu_error.reset_backoff_srcu); + mutex_init(&i915->gpu_error.wedge_mutex); i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) @@ -198,46 +201,22 @@ struct drm_i915_private *mock_gem_device(void) i915->gt.awake = true; - i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN); - if (!i915->objects) - goto err_wq; - - i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); - if (!i915->vmas) - goto err_objects; - - i915->requests = KMEM_CACHE(mock_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!i915->requests) - goto err_vmas; - - i915->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!i915->dependencies) - goto err_requests; - - i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!i915->priorities) - goto err_dependencies; + i915_timelines_init(i915); - INIT_LIST_HEAD(&i915->gt.timelines); INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.closed_vma); mutex_lock(&i915->drm.struct_mutex); - mock_init_ggtt(i915); + mock_init_ggtt(i915, &i915->ggtt); - mkwrite_device_info(i915)->ring_mask = BIT(0); + mkwrite_device_info(i915)->engine_mask = BIT(0); i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) goto err_unlock; - i915->engine[RCS] = mock_engine(i915, "mock", RCS); - if (!i915->engine[RCS]) + i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); + if (!i915->engine[RCS0]) goto err_context; mutex_unlock(&i915->drm.struct_mutex); @@ -250,16 +229,7 @@ err_context: i915_gem_contexts_fini(i915); err_unlock: mutex_unlock(&i915->drm.struct_mutex); - kmem_cache_destroy(i915->priorities); -err_dependencies: - kmem_cache_destroy(i915->dependencies); -err_requests: - kmem_cache_destroy(i915->requests); -err_vmas: - kmem_cache_destroy(i915->vmas); -err_objects: - kmem_cache_destroy(i915->objects); -err_wq: + i915_timelines_fini(i915); destroy_workqueue(i915->wq); err_drv: drm_mode_config_cleanup(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6ae418c76015..cd83929fde8e 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - i915_address_space_init(&ppgtt->vm, i915); + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -97,11 +97,12 @@ static void mock_unbind_ggtt(struct i915_vma *vma) { } -void mock_init_ggtt(struct drm_i915_private *i915) +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; + memset(ggtt, 0, sizeof(*ggtt)); ggtt->vm.i915 = i915; + ggtt->vm.is_ggtt = true; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); ggtt->mappable_end = resource_size(&ggtt->gmadr); @@ -117,14 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, i915); - - ggtt->vm.is_ggtt = true; + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); } -void mock_fini_ggtt(struct drm_i915_private *i915) +void mock_fini_ggtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; - i915_address_space_fini(&ggtt->vm); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h index 9a0a833bb545..40d544bde1d5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.h +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h @@ -25,8 +25,8 @@ #ifndef __MOCK_GTT_H #define __MOCK_GTT_H -void mock_init_ggtt(struct drm_i915_private *i915); -void mock_fini_ggtt(struct drm_i915_private *i915); +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt); +void mock_fini_ggtt(struct i915_ggtt *ggtt); struct i915_hw_ppgtt * mock_ppgtt(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 0dc29e242597..d1a7c9608712 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine, unsigned long delay) { struct i915_request *request; - struct mock_request *mock; /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = i915_request_alloc(engine, context); if (IS_ERR(request)) return NULL; - mock = container_of(request, typeof(*mock), base); - mock->delay = delay; - - return &mock->base; + request->mock.delay = delay; + return request; } bool mock_cancel_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); bool was_queued; spin_lock_irq(&engine->hw_lock); - was_queued = !list_empty(&mock->link); - list_del_init(&mock->link); + was_queued = !list_empty(&request->mock.link); + list_del_init(&request->mock.link); spin_unlock_irq(&engine->hw_lock); if (was_queued) diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h index 995fb728380c..4acf0211df20 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.h +++ b/drivers/gpu/drm/i915/selftests/mock_request.h @@ -29,13 +29,6 @@ #include "../i915_request.h" -struct mock_request { - struct i915_request base; - - struct list_head link; - unsigned long delay; -}; - struct i915_request * mock_request(struct intel_engine_cs *engine, struct i915_gem_context *context, diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index dcf3b16f5a07..416d85233263 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -10,11 +10,14 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) { + timeline->i915 = NULL; timeline->fence_context = context; spin_lock_init(&timeline->lock); + mutex_init(&timeline->mutex); - init_request_active(&timeline->last_request, NULL); + INIT_ACTIVE_REQUEST(&timeline->barrier); + INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); @@ -24,5 +27,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) void mock_timeline_fini(struct i915_timeline *timeline) { - i915_timeline_fini(timeline); + i915_syncmap_free(&timeline->sync); } diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index 8ef14c7e5e38..ff8999c63a12 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -26,21 +26,21 @@ #define __nop_write(x) \ static void \ -nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { } +nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { } __nop_write(8) __nop_write(16) __nop_write(32) #define __nop_read(x) \ static u##x \ -nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; } +nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; } __nop_read(8) __nop_read(16) __nop_read(32) __nop_read(64) -void mock_uncore_init(struct drm_i915_private *i915) +void mock_uncore_init(struct intel_uncore *uncore) { - ASSIGN_WRITE_MMIO_VFUNCS(i915, nop); - ASSIGN_READ_MMIO_VFUNCS(i915, nop); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop); + ASSIGN_READ_MMIO_VFUNCS(uncore, nop); } diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h index d79aa3ca4d51..dacb36b5ffcd 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.h +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h @@ -25,6 +25,6 @@ #ifndef __MOCK_UNCORE_H #define __MOCK_UNCORE_H -void mock_uncore_init(struct drm_i915_private *i915); +void mock_uncore_init(struct intel_uncore *uncore); #endif /* !__MOCK_UNCORE_H */ diff --git a/drivers/gpu/drm/i915/test_i915_active_types_standalone.c b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c new file mode 100644 index 000000000000..144ebd153e57 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_active_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c new file mode 100644 index 000000000000..4e4da4860bc2 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_gem_context_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_priolist_types_standalone.c b/drivers/gpu/drm/i915/test_i915_priolist_types_standalone.c new file mode 100644 index 000000000000..f465eb99bb47 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_priolist_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_priolist_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c b/drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c new file mode 100644 index 000000000000..8afa2c3719fb --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_scheduler_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c new file mode 100644 index 000000000000..f58e148e8946 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_timeline_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_context_types_standalone.c b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c new file mode 100644 index 000000000000..b39e3c4e6551 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_context_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c new file mode 100644 index 000000000000..d05e4cdcbcf9 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_engine_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c new file mode 100644 index 000000000000..4f658bb00825 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_workarounds_types.h" diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 361e962a7969..0a950c976bbb 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -23,7 +23,6 @@ * Author: Jani Nikula <jani.nikula@intel.com> */ -#include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -79,7 +78,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), mask, mask, 100)) DRM_ERROR("DPI FIFOs are not empty\n"); @@ -149,7 +148,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* note: this is never true for reads */ if (packet.payload_length) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), data_mask, 0, 50)) @@ -163,7 +162,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); } - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), ctrl_mask, 0, 50)) { @@ -175,7 +174,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_INTR_STAT(port), data_mask, data_mask, 50)) @@ -235,7 +234,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, I915_WRITE(MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_INTR_STAT(port), mask, mask, 100)) DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); @@ -257,9 +256,9 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->sb_lock); } -static bool intel_dsi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, @@ -276,7 +275,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, if (fixed_mode) { intel_fixed_panel_mode(fixed_mode, adjusted_mode); - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH(dev_priv)) intel_gmch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode); else @@ -285,11 +284,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; + if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) + pipe_config->pipe_bpp = 24; + else + pipe_config->pipe_bpp = 18; + if (IS_GEN9_LP(dev_priv)) { /* Enable Frame time stamp based scanline reporting */ adjusted_mode->private_flags |= @@ -303,16 +307,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, ret = bxt_dsi_pll_compute(encoder, pipe_config); if (ret) - return false; + return -EINVAL; } else { ret = vlv_dsi_pll_compute(encoder, pipe_config); if (ret) - return false; + return -EINVAL; } pipe_config->clock_set = true; - return true; + return 0; } static bool glk_dsi_enable_io(struct intel_encoder *encoder) @@ -349,16 +353,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, - GLK_MIPIIO_PORT_POWERED, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_MIPIIO_PORT_POWERED, + GLK_MIPIIO_PORT_POWERED, + 20)) DRM_ERROR("MIPIO port is powergated\n"); } /* Check for cold boot scenario */ for_each_dsi_port(port, intel_dsi->ports) { - cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) & - DEVICE_READY); + cold_boot |= + !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY); } return cold_boot; @@ -373,9 +379,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, - GLK_PHY_STATUS_PORT_READY, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_PHY_STATUS_PORT_READY, + GLK_PHY_STATUS_PORT_READY, + 20)) DRM_ERROR("PHY is not ON\n"); } @@ -399,8 +407,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) I915_WRITE(MIPI_DEVICE_READY(port), val); /* Wait for ULPS active */ - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_ULPS_NOT_ACTIVE, + 0, + 20)) DRM_ERROR("ULPS not active\n"); /* Exit ULPS */ @@ -423,17 +434,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for Stop state */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE, - GLK_DATA_LANE_STOP_STATE, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_DATA_LANE_STOP_STATE, + GLK_DATA_LANE_STOP_STATE, + 20)) DRM_ERROR("Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT, - AFE_LATCHOUT, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + BXT_MIPI_PORT_CTRL(port), + AFE_LATCHOUT, + AFE_LATCHOUT, + 20)) DRM_ERROR("D-PHY not entering LP-11 state\n"); } } @@ -533,7 +548,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 0, 20)) DRM_ERROR("PHY is not turning OFF\n"); @@ -541,7 +556,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 0, 20)) DRM_ERROR("MIPI IO Port is not powergated\n"); @@ -562,7 +577,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 0, 20)) DRM_ERROR("PHY is not turning OFF\n"); @@ -612,7 +627,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * Port A only. MIPI Port C has no similar bit for checking. */ if ((IS_GEN9_LP(dev_priv) || port == PORT_A) && - intel_wait_for_register(dev_priv, + intel_wait_for_register(&dev_priv->uncore, port_ctrl, AFE_LATCHOUT, 0, 30)) DRM_ERROR("DSI LP not going Low\n"); @@ -674,6 +689,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, LANE_CONFIGURATION_DUAL_LINK_B : LANE_CONFIGURATION_DUAL_LINK_A; } + + if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888) + temp |= DITHERING_ENABLE; + /* assert ip_tg_enable signal */ I915_WRITE(port_ctrl, temp | DPI_ENABLE); POSTING_READ(port_ctrl); @@ -960,13 +979,15 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + intel_wakeref_t wakeref; enum port port; bool active = false; DRM_DEBUG_KMS("\n"); - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; /* @@ -1022,7 +1043,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, } out_put_power: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return active; } @@ -1058,10 +1079,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, } fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; - pipe_config->pipe_bpp = - mipi_dsi_pixel_format_to_bpp( - pixel_format_from_register_bits(fmt)); - bpp = pipe_config->pipe_bpp; + bpp = mipi_dsi_pixel_format_to_bpp( + pixel_format_from_register_bits(fmt)); /* Enable Frame time stamo based scanline reporting */ adjusted_mode->private_flags |= @@ -1199,11 +1218,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, if (IS_GEN9_LP(dev_priv)) { bxt_dsi_get_pipe_config(encoder, pipe_config); - pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); + pclk = bxt_dsi_get_pclk(encoder, pipe_config); } else { - pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); + pclk = vlv_dsi_get_pclk(encoder, pipe_config); } if (pclk) { @@ -1575,6 +1592,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) enum drm_panel_orientation orientation; struct intel_plane *plane; struct intel_crtc *crtc; + intel_wakeref_t wakeref; enum pipe pipe; u32 val; @@ -1585,7 +1603,8 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) plane = to_intel_plane(crtc->base.primary); power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; val = I915_READ(DSPCNTR(plane->i9xx_plane)); @@ -1597,7 +1616,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) else orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return orientation; } @@ -1625,7 +1644,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) u32 allowed_scalers; allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); - if (!HAS_GMCH_DISPLAY(dev_priv)) + if (!HAS_GMCH(dev_priv)) allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); drm_connector_attach_scaling_mode_property(&connector->base, @@ -1650,7 +1669,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct drm_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; - struct drm_display_mode *scan, *fixed_mode = NULL; + struct drm_display_mode *fixed_mode; enum port port; DRM_DEBUG_KMS("\n"); @@ -1689,6 +1708,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; intel_encoder->get_config = intel_dsi_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_connector->get_hw_state = intel_connector_get_hw_state; @@ -1760,13 +1780,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_connector_attach_encoder(intel_connector, intel_encoder); mutex_lock(&dev->mode_config.mutex); - intel_dsi_vbt_get_modes(intel_dsi); - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { @@ -1774,9 +1788,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) goto err; } - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index a132a8037ecc..5e7b1fb2db5d 100644 --- a/drivers/gpu/drm/i915/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 0, @@ -252,20 +252,12 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); } -static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) -{ - int bpp = mipi_dsi_pixel_format_to_bpp(fmt); - - WARN(bpp != pipe_bpp, - "bpp match assertion failure (expected %d, current %d)\n", - bpp, pipe_bpp); -} - -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock, pclk; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; @@ -319,15 +311,12 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, dsi_clock = (m * refclk) / (p * n); - /* pixel_format and pipe_bpp should agree */ - assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); - - pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp); + pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); return pclk; } -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { u32 pclk; @@ -335,12 +324,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, u32 dsi_ratio; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - /* Divide by zero */ - if (!pipe_bpp) { - DRM_ERROR("Invalid BPP(0)\n"); - return 0; - } + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL); @@ -348,10 +332,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - /* pixel_format and pipe_bpp should agree */ - assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); - - pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp); + pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk); return pclk; @@ -547,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, I915_WRITE(BXT_DSI_PLL_ENABLE, val); /* Timeout and fail if PLL not locked */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, BXT_DSI_PLL_LOCKED, |