summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug4
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h273
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c145
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c312
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c242
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h117
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c353
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c389
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h186
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c713
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h102
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c250
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c596
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c613
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h120
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_fixed.h (renamed from drivers/gpu/drm/i915/i915_fixed.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c533
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_regs.h348
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c47
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c322
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h309
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c470
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c22
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h327
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c72
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c227
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c95
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h26
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c66
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c19
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1407
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c52
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c60
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c21
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_step.c80
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c380
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
221 files changed, 7697 insertions, 6336 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index bc18e2d9ea05..d8397065c3f0 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,8 +27,8 @@ config DRM_I915_DEBUG
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
- select DRM_DP_AUX_CHARDEV
- select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
+ select DRM_DISPLAY_DP_AUX_CHARDEV
+ select DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3ef6ed41e62b..c8c8b31da4fb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,16 +32,11 @@ endif
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
-# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
-subdir-ccflags-y += -I$(srctree)/$(src)
+subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -118,6 +113,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
@@ -270,6 +266,7 @@ i915-y += \
display/intel_display_rps.o \
display/intel_display_wa.o \
display/intel_dmc.o \
+ display/intel_dmc_wl.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
@@ -433,7 +430,7 @@ no-header-test := \
always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
- $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+ $(shell cd $(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \
diff --git a/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
new file mode 100644
index 000000000000..275f4d9c3fb0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __BXT_DPIO_PHY_REGS_H__
+#define __BXT_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* BXT PHY registers */
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define _BXT_PHY2_BASE 0x163000
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
+#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
+ ((lane) & 1) * 0x80)
+#define _MMIO_BXT_PHY_CH_LN(phy, ch, lane, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) + _BXT_LANE_OFFSET(lane))
+
+/* BXT PHY PLL registers */
+#define _PORT_PLL_A 0x46074
+#define _PORT_PLL_B 0x46078
+#define _PORT_PLL_C 0x4607c
+#define PORT_PLL_ENABLE REG_BIT(31)
+#define PORT_PLL_LOCK REG_BIT(30)
+#define PORT_PLL_REF_SEL REG_BIT(27)
+#define PORT_PLL_POWER_ENABLE REG_BIT(26)
+#define PORT_PLL_POWER_STATE REG_BIT(25)
+#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+
+#define _PORT_PLL_EBB_0_A 0x162034
+#define _PORT_PLL_EBB_0_B 0x6C034
+#define _PORT_PLL_EBB_0_C 0x6C340
+#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
+#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
+#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
+
+#define _PORT_PLL_EBB_4_A 0x162038
+#define _PORT_PLL_EBB_4_B 0x6C038
+#define _PORT_PLL_EBB_4_C 0x6C344
+#define PORT_PLL_RECALIBRATE REG_BIT(14)
+#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
+
+#define _PORT_PLL_0_A 0x162100
+#define _PORT_PLL_0_B 0x6C100
+#define _PORT_PLL_0_C 0x6C380
+/* PORT_PLL_0_A */
+#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
+#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
+/* PORT_PLL_1_A */
+#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
+#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
+/* PORT_PLL_2_A */
+#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
+#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
+/* PORT_PLL_3_A */
+#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
+/* PORT_PLL_6_A */
+#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
+#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
+#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
+#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
+/* PORT_PLL_8_A */
+#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
+/* PORT_PLL_9_A */
+#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
+/* PORT_PLL_10_A */
+#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
+#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
+#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
+
+/* BXT PHY common lane registers */
+#define _PORT_CL1CM_DW0_A 0x162000
+#define _PORT_CL1CM_DW0_BC 0x6C000
+#define PHY_POWER_GOOD REG_BIT(16)
+#define PHY_RESERVED REG_BIT(7)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
+
+#define _PORT_CL1CM_DW9_A 0x162024
+#define _PORT_CL1CM_DW9_BC 0x6C024
+#define IREF0RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF0RC_OFFSET(x) REG_FIELD_PREP(IREF0RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
+
+#define _PORT_CL1CM_DW10_A 0x162028
+#define _PORT_CL1CM_DW10_BC 0x6C028
+#define IREF1RC_OFFSET_MASK REG_GENMASK(15, 8)
+#define IREF1RC_OFFSET(x) REG_FIELD_PREP(IREF1RC_OFFSET_MASK, (x))
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+
+#define _PORT_CL1CM_DW28_A 0x162070
+#define _PORT_CL1CM_DW28_BC 0x6C070
+#define OCL1_POWER_DOWN_EN REG_BIT(23)
+#define DW28_OLDO_DYN_PWR_DOWN_EN REG_BIT(22)
+#define SUS_CLK_CONFIG REG_GENMASK(1, 0)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
+
+#define _PORT_CL1CM_DW30_A 0x162078
+#define _PORT_CL1CM_DW30_BC 0x6C078
+#define OCL2_LDOFUSE_PWR_DIS REG_BIT(6)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
+#define DW6_OLDO_DYN_PWR_DOWN_EN REG_BIT(28)
+
+/* BXT PHY Ref registers */
+#define _PORT_REF_DW3_A 0x16218C
+#define _PORT_REF_DW3_BC 0x6C18C
+#define GRC_DONE REG_BIT(22)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
+
+#define _PORT_REF_DW6_A 0x162198
+#define _PORT_REF_DW6_BC 0x6C198
+#define GRC_CODE_MASK REG_GENMASK(31, 24)
+#define GRC_CODE(x) REG_FIELD_PREP(GRC_CODE_MASK, (x))
+#define GRC_CODE_FAST_MASK REG_GENMASK(23, 16)
+#define GRC_CODE_FAST(x) REG_FIELD_PREP(GRC_CODE_FAST_MASK, (x))
+#define GRC_CODE_SLOW_MASK REG_GENMASK(15, 8)
+#define GRC_CODE_SLOW(x) REG_FIELD_PREP(GRC_CODE_SLOW_MASK, (x))
+#define GRC_CODE_NOM_MASK REG_GENMASK(7, 0)
+#define GRC_CODE_NOM(x) REG_FIELD_PREP(GRC_CODE_NOM_MASK, (x))
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
+
+#define _PORT_REF_DW8_A 0x1621A0
+#define _PORT_REF_DW8_BC 0x6C1A0
+#define GRC_DIS REG_BIT(15)
+#define GRC_RDY_OVRD REG_BIT(1)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
+
+/* BXT PHY PCS registers */
+#define _PORT_PCS_DW10_LN01_A 0x162428
+#define _PORT_PCS_DW10_LN01_B 0x6C428
+#define _PORT_PCS_DW10_LN01_C 0x6C828
+#define _PORT_PCS_DW10_GRP_A 0x162C28
+#define _PORT_PCS_DW10_GRP_B 0x6CC28
+#define _PORT_PCS_DW10_GRP_C 0x6CE28
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
+#define TX2_SWING_CALC_INIT REG_BIT(31)
+#define TX1_SWING_CALC_INIT REG_BIT(30)
+
+#define _PORT_PCS_DW12_LN01_A 0x162430
+#define _PORT_PCS_DW12_LN01_B 0x6C430
+#define _PORT_PCS_DW12_LN01_C 0x6C830
+#define _PORT_PCS_DW12_LN23_A 0x162630
+#define _PORT_PCS_DW12_LN23_B 0x6C630
+#define _PORT_PCS_DW12_LN23_C 0x6CA30
+#define _PORT_PCS_DW12_GRP_A 0x162c30
+#define _PORT_PCS_DW12_GRP_B 0x6CC30
+#define _PORT_PCS_DW12_GRP_C 0x6CE30
+#define LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define LANE_STAGGER_MASK REG_GENMASK(4, 0)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
+
+/* BXT PHY TX registers */
+#define _PORT_TX_DW2_LN0_A 0x162508
+#define _PORT_TX_DW2_LN0_B 0x6C508
+#define _PORT_TX_DW2_LN0_C 0x6C908
+#define _PORT_TX_DW2_GRP_A 0x162D08
+#define _PORT_TX_DW2_GRP_B 0x6CD08
+#define _PORT_TX_DW2_GRP_C 0x6CF08
+#define BXT_PORT_TX_DW2_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
+#define MARGIN_000_MASK REG_GENMASK(23, 16)
+#define MARGIN_000(x) REG_FIELD_PREP(MARGIN_000_MASK, (x))
+#define UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(UNIQ_TRANS_SCALE_MASK, (x))
+
+#define _PORT_TX_DW3_LN0_A 0x16250C
+#define _PORT_TX_DW3_LN0_B 0x6C50C
+#define _PORT_TX_DW3_LN0_C 0x6C90C
+#define _PORT_TX_DW3_GRP_A 0x162D0C
+#define _PORT_TX_DW3_GRP_B 0x6CD0C
+#define _PORT_TX_DW3_GRP_C 0x6CF0C
+#define BXT_PORT_TX_DW3_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
+#define SCALE_DCOMP_METHOD REG_BIT(26)
+#define UNIQUE_TRANGE_EN_METHOD REG_BIT(27)
+
+#define _PORT_TX_DW4_LN0_A 0x162510
+#define _PORT_TX_DW4_LN0_B 0x6C510
+#define _PORT_TX_DW4_LN0_C 0x6C910
+#define _PORT_TX_DW4_GRP_A 0x162D10
+#define _PORT_TX_DW4_GRP_B 0x6CD10
+#define _PORT_TX_DW4_GRP_C 0x6CF10
+#define BXT_PORT_TX_DW4_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
+#define DE_EMPHASIS_MASK REG_GENMASK(31, 24)
+#define DE_EMPHASIS(x) REG_FIELD_PREP(DE_EMPHASIS_MASK, (x))
+
+#define _PORT_TX_DW5_LN0_A 0x162514
+#define _PORT_TX_DW5_LN0_B 0x6C514
+#define _PORT_TX_DW5_LN0_C 0x6C914
+#define _PORT_TX_DW5_GRP_A 0x162D14
+#define _PORT_TX_DW5_GRP_B 0x6CD14
+#define _PORT_TX_DW5_GRP_C 0x6CF14
+#define BXT_PORT_TX_DW5_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW5_LN0_B, \
+ _PORT_TX_DW5_LN0_C)
+#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_GRP_B, \
+ _PORT_TX_DW5_GRP_C)
+#define DCC_DELAY_RANGE_1 REG_BIT(9)
+#define DCC_DELAY_RANGE_2 REG_BIT(8)
+
+#define _PORT_TX_DW14_LN0_A 0x162538
+#define _PORT_TX_DW14_LN0_B 0x6C538
+#define _PORT_TX_DW14_LN0_C 0x6C938
+#define LATENCY_OPTIM REG_BIT(30)
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \
+ _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C)
+
+#endif /* __BXT_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dfe0b07a122d..06ec04e667e3 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
@@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index eda4a8b88590..79ecfc339430 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
@@ -1615,8 +1616,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 2bb270f82932..7a77ae3dc394 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -62,7 +62,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_digital_connector_state *intel_conn_state =
+ const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
if (property == dev_priv->display.properties.force_audio)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 07e0c73204f3..40e7d862675e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -76,19 +76,6 @@ struct intel_audio_funcs {
struct intel_crtc_state *crtc_state);
};
-/* DP N/M table */
-#define LC_810M 810000
-#define LC_540M 540000
-#define LC_270M 270000
-#define LC_162M 162000
-
-struct dp_aud_n_m {
- int sample_rate;
- int clock;
- u16 m;
- u16 n;
-};
-
struct hdmi_aud_ncts {
int sample_rate;
int clock;
@@ -96,60 +83,6 @@ struct hdmi_aud_ncts {
int cts;
};
-/* Values according to DP 1.4 Table 2-104 */
-static const struct dp_aud_n_m dp_aud_n_m[] = {
- { 32000, LC_162M, 1024, 10125 },
- { 44100, LC_162M, 784, 5625 },
- { 48000, LC_162M, 512, 3375 },
- { 64000, LC_162M, 2048, 10125 },
- { 88200, LC_162M, 1568, 5625 },
- { 96000, LC_162M, 1024, 3375 },
- { 128000, LC_162M, 4096, 10125 },
- { 176400, LC_162M, 3136, 5625 },
- { 192000, LC_162M, 2048, 3375 },
- { 32000, LC_270M, 1024, 16875 },
- { 44100, LC_270M, 784, 9375 },
- { 48000, LC_270M, 512, 5625 },
- { 64000, LC_270M, 2048, 16875 },
- { 88200, LC_270M, 1568, 9375 },
- { 96000, LC_270M, 1024, 5625 },
- { 128000, LC_270M, 4096, 16875 },
- { 176400, LC_270M, 3136, 9375 },
- { 192000, LC_270M, 2048, 5625 },
- { 32000, LC_540M, 1024, 33750 },
- { 44100, LC_540M, 784, 18750 },
- { 48000, LC_540M, 512, 11250 },
- { 64000, LC_540M, 2048, 33750 },
- { 88200, LC_540M, 1568, 18750 },
- { 96000, LC_540M, 1024, 11250 },
- { 128000, LC_540M, 4096, 33750 },
- { 176400, LC_540M, 3136, 18750 },
- { 192000, LC_540M, 2048, 11250 },
- { 32000, LC_810M, 1024, 50625 },
- { 44100, LC_810M, 784, 28125 },
- { 48000, LC_810M, 512, 16875 },
- { 64000, LC_810M, 2048, 50625 },
- { 88200, LC_810M, 1568, 28125 },
- { 96000, LC_810M, 1024, 16875 },
- { 128000, LC_810M, 4096, 50625 },
- { 176400, LC_810M, 3136, 28125 },
- { 192000, LC_810M, 2048, 16875 },
-};
-
-static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
- if (rate == dp_aud_n_m[i].sample_rate &&
- crtc_state->port_clock == dp_aud_n_m[i].clock)
- return &dp_aud_n_m[i];
- }
-
- return NULL;
-}
-
static const struct {
int clock;
u32 config;
@@ -387,47 +320,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = encoder->port;
- const struct dp_aud_n_m *nm;
- int rate;
- u32 tmp;
-
- rate = acomp ? acomp->aud_sample_rate[port] : 0;
- nm = audio_config_dp_get_n_m(crtc_state, rate);
- if (nm)
- drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
- nm->n);
- else
- drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
-
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
-
- if (nm) {
- tmp &= ~AUD_CONFIG_N_MASK;
- tmp |= AUD_CONFIG_N(nm->n);
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- }
-
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
-
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
- tmp &= ~AUD_CONFIG_M_MASK;
- tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
- if (nm) {
- tmp |= nm->m;
- tmp |= AUD_M_CTS_M_VALUE_INDEX;
- tmp |= AUD_M_CTS_M_PROG_ENABLE;
- }
+ /* Enable time stamps. Let HW calculate Maud/Naud values */
+ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
+ AUD_CONFIG_UPPER_N_MASK |
+ AUD_CONFIG_LOWER_N_MASK |
+ AUD_CONFIG_N_PROG_ENABLE,
+ AUD_CONFIG_N_VALUE_INDEX);
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -1349,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
- int ret;
-
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
- I915_COMPONENT_AUDIO);
- if (ret < 0) {
- drm_err(&i915->drm,
- "failed to add audio component (%d)\n", ret);
- /* continue with reduced functionality */
- return;
- }
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1382,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = component_add_typed(i915->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
i915->display.audio.component_registered = true;
}
@@ -1414,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
+void intel_audio_register(struct drm_i915_private *i915)
+{
+ if (!i915->display.audio.lpe.platdev)
+ i915_audio_component_register(i915);
+}
+
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 9327954b801e..576c061d72a4 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
index 616e7b1275c4..88ea2740365d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -148,4 +148,20 @@
#define HBLANK_START_COUNT_96 4
#define HBLANK_START_COUNT_128 5
+/* LPE Audio */
+#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
+#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
+
+#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
+#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
+
+#define _VLV_AUD_PORT_EN_B_DBG 0x62F20
+#define _VLV_AUD_PORT_EN_C_DBG 0x62F30
+#define _VLV_AUD_PORT_EN_D_DBG 0x62F34
+#define VLV_AUD_PORT_EN_DBG(port) _MMIO_BASE_PORT3(VLV_DISPLAY_BASE, (port) - PORT_B, \
+ _VLV_AUD_PORT_EN_B_DBG, \
+ _VLV_AUD_PORT_EN_C_DBG, \
+ _VLV_AUD_PORT_EN_D_DBG)
+#define VLV_AMP_MUTE (1 << 1)
+
#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 1946d7fb3c2e..071668bfe5d1 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness < 0)
+ if (display->params.invert_brightness < 0)
return val;
- if (i915->display.params.invert_brightness > 0 ||
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
+ if (display->params.invert_brightness > 0 ||
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -126,15 +126,15 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->display.params.invert_brightness > 0 ||
- (i915->display.params.invert_brightness == 0 &&
- intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (display->params.invert_brightness > 0 ||
+ (display->params.invert_brightness == 0 &&
+ intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -761,8 +761,8 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level <= panel->backlight.min) {
- panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.level < panel->backlight.min) {
+ panel->backlight.level = panel->backlight.min;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
scale_hw_to_user(connector,
@@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
else
props.power = FB_BLANK_POWERDOWN;
- name = kstrdup("intel_backlight", GFP_KERNEL);
+ name = kstrdup_const("intel_backlight", GFP_KERNEL);
if (!name)
return -ENOMEM;
@@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
* compatibility. Use unique names for subsequent backlight devices as a
* fallback when the default name already exists.
*/
- kfree(name);
+ kfree_const(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
@@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
connector->base.base.id, connector->base.name, name);
out:
- kfree(name);
+ kfree_const(name);
return ret;
}
@@ -1642,17 +1642,17 @@ void intel_backlight_update(struct intel_atomic_state *state,
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_has_quirk(display, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
connector->base.base.id, connector->base.name);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] no backlight present per VBT\n",
connector->base.base.id, connector->base.name);
return 0;
@@ -1660,16 +1660,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs))
+ if (drm_WARN_ON(display->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] failed to setup backlight\n",
connector->base.base.id, connector->base.name);
return ret;
@@ -1677,7 +1677,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
panel->backlight.present = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
@@ -1821,7 +1821,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fe52c06271ef..5fb48b6129b6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/firmware.h>
+
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
@@ -593,11 +595,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
-static const struct lvds_pnp_id *
+static const struct drm_edid_product_id *
get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
+ /* These two are supposed to have the same layout in memory. */
+ BUILD_BUG_ON(sizeof(struct lvds_pnp_id) != sizeof(struct drm_edid_product_id));
+
return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
}
@@ -611,19 +616,6 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
-static void dump_pnp_id(struct drm_i915_private *i915,
- const struct lvds_pnp_id *pnp_id,
- const char *name)
-{
- u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
- char vend[4];
-
- drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
- name, drm_edid_decode_mfg_id(mfg_name, vend),
- pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
- pnp_id->mfg_week, pnp_id->mfg_year + 1990);
-}
-
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
@@ -662,21 +654,21 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *edid_id;
- struct lvds_pnp_id edid_id_nodate;
- const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
+ struct drm_edid_product_id product_id, product_id_nodate;
+ struct drm_printer p;
int i, best = -1;
- if (!edid)
+ if (!drm_edid)
return -1;
- edid_id = (const void *)&edid->mfg_id[0];
+ drm_edid_get_product_id(drm_edid, &product_id);
- edid_id_nodate = *edid_id;
- edid_id_nodate.mfg_week = 0;
- edid_id_nodate.mfg_year = 0;
+ product_id_nodate = product_id;
+ product_id_nodate.week_of_manufacture = 0;
+ product_id_nodate.year_of_manufacture = 0;
- dump_pnp_id(i915, edid_id, "EDID");
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID");
+ drm_edid_print_product_id(&p, &product_id, true);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
@@ -687,11 +679,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
return -1;
for (i = 0; i < 16; i++) {
- const struct lvds_pnp_id *vbt_id =
+ const struct drm_edid_product_id *vbt_id =
get_lvds_pnp_id(data, ptrs, i);
/* full match? */
- if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ if (!memcmp(vbt_id, &product_id, sizeof(*vbt_id)))
return i;
/*
@@ -699,7 +691,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
* and the VBT entry does not specify a date.
*/
if (best < 0 &&
- !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ !memcmp(vbt_id, &product_id_nodate, sizeof(*vbt_id)))
best = i;
}
@@ -885,7 +877,8 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- const struct lvds_pnp_id *pnp_id;
+ const struct drm_edid_product_id *pnp_id;
+ struct drm_printer p;
int panel_type = panel->vbt.panel_type;
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -900,7 +893,9 @@ parse_lfp_data(struct drm_i915_private *i915,
parse_lfp_panel_dtd(i915, panel, data, ptrs);
pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
- dump_pnp_id(i915, pnp_id, "Panel");
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel");
+ drm_edid_print_product_id(&p, pnp_id, false);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
@@ -1042,22 +1037,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
- size_t exp_size;
+ const struct lfp_backlight_control_method *method;
- if (i915->display.vbt.version >= 236)
- exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->display.vbt.version >= 234)
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
- else
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
-
- if (get_blocksize(backlight_data) >= exp_size) {
- const struct lfp_backlight_control_method *method;
-
- method = &backlight_data->backlight_control[panel_type];
- panel->vbt.backlight.type = method->type;
- panel->vbt.backlight.controller = method->controller;
- }
+ method = &backlight_data->backlight_control[panel_type];
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
}
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
@@ -1955,16 +1939,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
- struct intel_panel *panel)
+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
- /* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(i915))
- return;
-
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
@@ -2000,6 +1980,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
+/*
+ * Some machines (eg. Lenovo 82TQ) appear to have broken
+ * VBT sequences:
+ * - INIT_OTP is not present at all
+ * - what should be in INIT_OTP is in DISPLAY_ON
+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
+ * (along with the actual backlight stuff)
+ *
+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
+ *
+ * TODO: Do we need to limit this to specific machines,
+ * or examine the contents of the sequences to
+ * avoid false positives?
+ */
+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+
+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
+ }
+}
+
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (DISPLAY_VER(i915) >= 11)
+ icl_fixup_mipi_sequences(i915, panel);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_fixup_mipi_sequences(i915, panel);
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
@@ -2699,6 +2714,57 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
print_ddi_port(devdata);
}
+static int child_device_expected_size(u16 version)
+{
+ BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
+
+ if (version > 256)
+ return -ENOENT;
+ else if (version >= 256)
+ return 40;
+ else if (version >= 216)
+ return 39;
+ else if (version >= 196)
+ return 38;
+ else if (version >= 195)
+ return 37;
+ else if (version >= 111)
+ return LEGACY_CHILD_DEVICE_CONFIG_SIZE;
+ else if (version >= 106)
+ return 27;
+ else
+ return 22;
+}
+
+static bool child_device_size_valid(struct drm_i915_private *i915, int size)
+{
+ int expected_size;
+
+ expected_size = child_device_expected_size(i915->display.vbt.version);
+ if (expected_size < 0) {
+ expected_size = sizeof(struct child_device_config);
+ drm_dbg(&i915->drm,
+ "Expected child device config size for VBT version %u not known; assuming %d\n",
+ i915->display.vbt.version, expected_size);
+ }
+
+ /* Flag an error for unexpected size, but continue anyway. */
+ if (size != expected_size)
+ drm_err(&i915->drm,
+ "Unexpected child device config size %d (expected %d for VBT version %u)\n",
+ size, expected_size, i915->display.vbt.version);
+
+ /* The legacy sized child device config is the minimum we need. */
+ if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
+ drm_dbg_kms(&i915->drm,
+ "Child device config size %d is too small.\n",
+ size);
+ return false;
+ }
+
+ return true;
+}
+
static void
parse_general_definitions(struct drm_i915_private *i915)
{
@@ -2706,7 +2772,6 @@ parse_general_definitions(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
int i, child_device_num;
- u8 expected_size;
u16 block_size;
int bus_pin;
@@ -2730,39 +2795,8 @@ parse_general_definitions(struct drm_i915_private *i915)
if (intel_gmbus_is_valid_pin(i915, bus_pin))
i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->display.vbt.version < 106) {
- expected_size = 22;
- } else if (i915->display.vbt.version < 111) {
- expected_size = 27;
- } else if (i915->display.vbt.version < 195) {
- expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->display.vbt.version == 195) {
- expected_size = 37;
- } else if (i915->display.vbt.version <= 215) {
- expected_size = 38;
- } else if (i915->display.vbt.version <= 250) {
- expected_size = 39;
- } else {
- expected_size = sizeof(*child);
- BUILD_BUG_ON(sizeof(*child) < 39);
- drm_dbg(&i915->drm,
- "Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->display.vbt.version, expected_size);
- }
-
- /* Flag an error for unexpected size, but continue anyway. */
- if (defs->child_dev_size != expected_size)
- drm_err(&i915->drm,
- "Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->display.vbt.version);
-
- /* The legacy sized child device config is the minimum we need. */
- if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&i915->drm,
- "Child device config size %u is too small.\n",
- defs->child_dev_size);
+ if (!child_device_size_valid(i915, defs->child_dev_size))
return;
- }
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
@@ -2838,9 +2872,8 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
{
+ unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
enum port port;
- int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
- BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
@@ -2950,6 +2983,43 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
return vbt;
}
+static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
+{
+ struct vbt_header *vbt = NULL;
+ const struct firmware *fw = NULL;
+ const char *name = i915->display.params.vbt_firmware;
+ int ret;
+
+ if (!name || !*name)
+ return NULL;
+
+ ret = request_firmware(&fw, name, i915->drm.dev);
+ if (ret) {
+ drm_err(&i915->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
+ return NULL;
+ }
+
+ if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
+ vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (vbt) {
+ drm_dbg_kms(&i915->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
+ if (size)
+ *size = fw->size;
+ }
+ } else {
+ drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
+ }
+
+ release_firmware(fw);
+
+ return vbt;
+}
+
static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
{
intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset);
@@ -2957,7 +3027,8 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
}
-static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *size)
{
u32 count, data, found, store = 0;
u32 static_region, oprom_offset;
@@ -3000,6 +3071,9 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+ if (size)
+ *size = vbt_size;
+
return (struct vbt_header *)vbt;
err_free_vbt:
@@ -3008,7 +3082,8 @@ err_not_found:
return NULL;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
void __iomem *p = NULL, *oprom;
@@ -3057,6 +3132,9 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
pci_unmap_rom(pdev, oprom);
+ if (sizep)
+ *sizep = vbt_size;
+
drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
return vbt;
@@ -3069,6 +3147,32 @@ err_unmap_oprom:
return NULL;
}
+static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
+ size_t *sizep)
+{
+ const struct vbt_header *vbt = NULL;
+ intel_wakeref_t wakeref;
+
+ vbt = firmware_get_vbt(i915, sizep);
+
+ if (!vbt)
+ vbt = intel_opregion_get_vbt(i915, sizep);
+
+ /*
+ * If the OpRegion does not have VBT, look in SPI flash
+ * through MMIO or PCI mapping
+ */
+ if (!vbt && IS_DGFX(i915))
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = spi_oprom_get_vbt(i915, sizep);
+
+ if (!vbt)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vbt = oprom_get_vbt(i915, sizep);
+
+ return vbt;
+}
+
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
* @i915: i915 device instance
@@ -3080,7 +3184,6 @@ err_unmap_oprom:
void intel_bios_init(struct drm_i915_private *i915)
{
const struct vbt_header *vbt;
- struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
INIT_LIST_HEAD(&i915->display.vbt.display_devices);
@@ -3094,21 +3197,7 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
- vbt = intel_opregion_get_vbt(i915, NULL);
-
- /*
- * If the OpRegion does not have VBT, look in SPI flash through MMIO or
- * PCI mapping
- */
- if (!vbt && IS_DGFX(i915)) {
- oprom_vbt = spi_oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
-
- if (!vbt) {
- oprom_vbt = oprom_get_vbt(i915);
- vbt = oprom_vbt;
- }
+ vbt = intel_bios_get_vbt(i915, NULL);
if (!vbt)
goto out;
@@ -3141,7 +3230,7 @@ out:
parse_sdvo_device_mapping(i915);
parse_ddi_ports(i915);
- kfree(oprom_vbt);
+ kfree(vbt);
}
static void intel_bios_init_panel(struct drm_i915_private *i915,
@@ -3313,8 +3402,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (intel_opregion_get_vbt(i915, NULL))
- return true;
+ return intel_opregion_vbt_present(i915);
}
return false;
@@ -3351,6 +3439,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
+ if (!devdata)
+ return false;
+
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
@@ -3672,13 +3763,12 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
const void *vbt;
size_t vbt_size;
- /*
- * FIXME: VBT might originate from other places than opregion, and then
- * this would be incorrect.
- */
- vbt = intel_opregion_get_vbt(i915, &vbt_size);
- if (vbt)
+ vbt = intel_bios_get_vbt(i915, &vbt_size);
+
+ if (vbt) {
seq_write(m, vbt, vbt_size);
+ kfree(vbt);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7f2a50b4f494..972ea887e232 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -162,7 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
+ drm_err(&dev_priv->drm,
+ "Failed to disable qgv points (0x%x) points: 0x%x\n",
+ ret, points_mask);
return ret;
}
@@ -290,8 +292,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp = &qi->points[i];
ret = intel_read_qgv_point_info(dev_priv, sp, i);
- if (ret)
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
return ret;
+ }
drm_dbg_kms(&dev_priv->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
@@ -659,6 +663,22 @@ static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
return bi->psf_bw[psf_gv_point];
}
+static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+ int num_active_planes, int qgv_point)
+{
+ unsigned int idx;
+
+ if (DISPLAY_VER(i915) >= 12)
+ idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ else
+ idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+
+ if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ return 0;
+
+ return i915->display.bw.max[idx].deratedbw[qgv_point];
+}
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@@ -735,6 +755,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -804,6 +825,80 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+ int num_active_planes)
+{
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int max_bw_point = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate =
+ icl_qgv_bw(i915, num_active_planes, i);
+
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = BIT(i);
+ max_bw = max_data_rate;
+ }
+ }
+
+ return max_bw_point;
+}
+
+static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+ unsigned int qgv_points,
+ unsigned int psf_points)
+{
+ return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+}
+
+static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int max_bw_point_mask = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(i915, i);
+
+ if (max_data_rate > max_bw) {
+ max_bw_point_mask = BIT(i);
+ max_bw = max_data_rate;
+ } else if (max_data_rate == max_bw) {
+ max_bw_point_mask |= BIT(i);
+ }
+ }
+
+ return max_bw_point_mask;
+}
+
+static void icl_force_disable_sagv(struct drm_i915_private *i915,
+ struct intel_bw_state *bw_state)
+{
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
+
+ drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ bw_state->qgv_points_mask);
+
+ icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+}
+
static int mtl_find_qgv_points(struct drm_i915_private *i915,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -881,8 +976,6 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int max_bw_point = 0;
- unsigned int max_bw = 0;
unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 psf_points = 0;
@@ -895,31 +988,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int idx;
- unsigned int max_data_rate;
-
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, i);
- else
- idx = icl_max_bw_index(i915, num_active_planes, i);
-
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
- continue;
-
- max_data_rate = i915->display.bw.max[idx].deratedbw[i];
-
- /*
- * We need to know which qgv point gives us
- * maximum bandwidth in order to disable SAGV
- * if we find that we exceed SAGV block time
- * with watermarks. By that moment we already
- * have those, as it is calculated earlier in
- * intel_atomic_check,
- */
- if (max_data_rate > max_bw) {
- max_bw_point = i;
- max_bw = max_data_rate;
- }
+ unsigned int max_data_rate = icl_qgv_bw(i915,
+ num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
@@ -963,20 +1033,18 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* cause.
*/
if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = BIT(max_bw_point);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
- max_bw_point);
+ qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
+ drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ qgv_points);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask =
- ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) &
- icl_qgv_points_mask(i915);
-
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
@@ -1272,8 +1340,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state))
+ (intel_can_enable_sagv(i915, old_bw_state) !=
+ intel_can_enable_sagv(i915, new_bw_state) ||
+ new_bw_state->force_check_qgv))
changed = true;
/*
@@ -1287,6 +1356,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_bw_state->force_check_qgv = false;
+
return 0;
}
@@ -1313,7 +1384,7 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *dev_priv)
+int intel_bw_init(struct drm_i915_private *i915)
{
struct intel_bw_state *state;
@@ -1321,8 +1392,15 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
+ intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
&state->base, &intel_bw_funcs);
+ /*
+ * Limit this only if we have SAGV. And for Display version 14 onwards
+ * sagv is handled though pmdemand requests
+ */
+ if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
+ icl_force_disable_sagv(i915, state);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 59cb4fc5db76..161813cca473 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -47,12 +47,19 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
+ /*
+ * Flag to force the QGV comparison in atomic check right after the
+ * hw state readout
+ */
+ bool force_check_qgv;
+
int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
-#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+#define to_intel_bw_state(global_state) \
+ container_of_const((global_state), struct intel_bw_state, base)
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed89b86ea625..7a833b5f2de2 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -39,6 +39,8 @@
#include "intel_pcode.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
+#include "skl_watermark.h"
+#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
/**
@@ -63,6 +65,32 @@
* DMC will not change the active CDCLK frequency however, so that part
* will still be performed by the driver directly.
*
+ * There are multiple components involved in the generation of the CDCLK
+ * frequency:
+ *
+ * - We have the CDCLK PLL, which generates an output clock based on a
+ * reference clock and a ratio parameter.
+ * - The CD2X Divider, which divides the output of the PLL based on a
+ * divisor selected from a set of pre-defined choices.
+ * - The CD2X Squasher, which further divides the output based on a
+ * waveform represented as a sequence of bits where each zero
+ * "squashes out" a clock cycle.
+ * - And, finally, a fixed divider that divides the output frequency by 2.
+ *
+ * As such, the resulting CDCLK frequency can be calculated with the
+ * following formula:
+ *
+ * cdclk = vco / cd2x_div / (sq_len / sq_div) / 2
+ *
+ * , where vco is the frequency generated by the PLL; cd2x_div
+ * represents the CD2X Divider; sq_len and sq_div are the bit length
+ * and the number of high bits for the CD2X Squasher waveform, respectively;
+ * and 2 represents the fixed divider.
+ *
+ * Note that some older platforms do not contain the CD2X Divider
+ * and/or CD2X Squasher, in which case we can ignore their respective
+ * factors in the formula above.
+ *
* Several methods exist to change the CDCLK frequency, which ones are
* supported depends on the platform:
*
@@ -993,15 +1021,14 @@ static int skl_cdclk_decimal(int cdclk)
return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
- int vco)
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco)
{
- bool changed = dev_priv->skl_preferred_vco_freq != vco;
+ bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco;
- dev_priv->skl_preferred_vco_freq = vco;
+ i915->display.cdclk.skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(dev_priv);
+ intel_update_max_cdclk(i915);
}
static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
@@ -1205,7 +1232,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
- if (dev_priv->skl_preferred_vco_freq == 0)
+ if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
dev_priv->display.cdclk.hw.vco);
return;
@@ -1213,7 +1240,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
cdclk_config = dev_priv->display.cdclk.hw;
- cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
cdclk_config.vco = 8100000;
cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
@@ -1391,7 +1418,7 @@ static const struct intel_cdclk_vals mtl_cdclk_table[] = {
{}
};
-static const struct intel_cdclk_vals lnl_cdclk_table[] = {
+static const struct intel_cdclk_vals xe2lpd_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
@@ -1656,6 +1683,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
out:
+ if (DISPLAY_VER(dev_priv) >= 20)
+ cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
@@ -1850,6 +1879,37 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 20;
+}
+
+static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return MDCLK_SOURCE_SEL_CDCLK_PLL;
+
+ return MDCLK_SOURCE_SEL_CD2XCLK;
+}
+
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ if (mdclk_source_is_cdclk_pll(i915))
+ return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk);
+
+ /* Otherwise, source for MDCLK is CD2XCLK. */
+ return 2;
+}
+
+static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config)
+{
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_mdclk_cdclk_ratio(i915, cdclk_config),
+ cdclk_config->joined_mbus);
+}
+
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
@@ -1954,7 +2014,7 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (DISPLAY_VER(i915) >= 20)
- val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
+ val |= xe2lpd_mdclk_source_sel(i915);
else
val |= skl_cdclk_decimal(cdclk);
@@ -1967,7 +2027,6 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u16 waveform;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1982,10 +2041,11 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
} else
bxt_cdclk_pll_update(dev_priv, vco);
- waveform = cdclk_squash_waveform(dev_priv, cdclk);
+ if (HAS_CDCLK_SQUASH(dev_priv)) {
+ u16 waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
+ }
intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
@@ -2030,6 +2090,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
cdclk_config, &mid_cdclk_config)) {
_bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
@@ -2038,6 +2101,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
_bxt_set_cdclk(dev_priv, cdclk_config, pipe);
}
+ if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+
if (DISPLAY_VER(dev_priv) >= 14)
/*
* NOOP - No Pcode communication needed for
@@ -2260,16 +2326,15 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
- * configurations requires a modeset on all pipes
+ * intel_cdclk_clock_changed - Check whether the clock changed
* @a: first CDCLK configuration
* @b: second CDCLK configuration
*
* Returns:
- * True if changing between the two CDCLK configurations
- * requires all pipes to be off, false if not.
+ * True if CDCLK changed in a way that requires re-programming and
+ * False otherwise.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
@@ -2322,7 +2387,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
- return intel_cdclk_needs_modeset(a, b) ||
+ return intel_cdclk_clock_changed(a, b) ||
a->voltage_level != b->voltage_level;
}
@@ -2368,18 +2433,9 @@ static void intel_pcode_notify(struct drm_i915_private *i915,
ret);
}
-/**
- * intel_set_cdclk - Push the CDCLK configuration to the hardware
- * @dev_priv: i915 device
- * @cdclk_config: new CDCLK configuration
- * @pipe: pipe with which to synchronize the update
- *
- * Program the hardware based on the passed in CDCLK state,
- * if necessary.
- */
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe)
+ enum pipe pipe, const char *context)
{
struct intel_encoder *encoder;
@@ -2389,7 +2445,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
- intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
+ intel_cdclk_dump_config(dev_priv, cdclk_config, context);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2519,6 +2575,17 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
update_cdclk, update_pipe_count);
}
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+
+ return new_cdclk_state && !new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk;
+}
+
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
* @state: intel atomic state
@@ -2534,7 +2601,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2543,12 +2611,32 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ /*
+ * mbus joining will be changed later by
+ * intel_dbuf_mbus_{pre,post}_ddb_update()
+ */
+ cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus;
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe,
+ "Pre changing CDCLK to");
}
/**
@@ -2566,7 +2654,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2575,12 +2663,16 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe,
+ "Post changing CDCLK to");
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2731,25 +2823,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->dsc.compression_enable)
min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
- /*
- * HACK. Currently for TGL/DG2 platforms we calculate
- * min_cdclk initially based on pixel_rate divided
- * by 2, accounting for also plane requirements,
- * however in some cases the lowest possible CDCLK
- * doesn't work and causing the underruns.
- * Explicitly stating here that this seems to be currently
- * rather a Hack, than final solution.
- */
- if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
- /*
- * Clamp to max_cdclk_freq in case pixel rate is higher,
- * in order not to break an 8K, but still leave W/A at place.
- */
- min_cdclk = max_t(int, min_cdclk,
- min_t(int, crtc_state->pixel_rate,
- dev_priv->display.cdclk.max_cdclk_freq));
- }
-
return min_cdclk;
}
@@ -2937,7 +3010,7 @@ static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
vco = cdclk_state->logical.vco;
if (!vco)
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.enable)
@@ -3058,6 +3131,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3121,6 +3195,20 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
return 0;
}
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->actual.joined_mbus = joined_mbus;
+ cdclk_state->logical.joined_mbus = joined_mbus;
+
+ return intel_atomic_lock_global_state(&cdclk_state->base);
+}
+
int intel_cdclk_init(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state *cdclk_state;
@@ -3229,17 +3317,28 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes_late(state, "CDCLK change");
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
+ if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) !=
+ intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) {
+ int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual);
+
+ ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio);
+ if (ret)
+ return ret;
+ }
+
drm_dbg_kms(&dev_priv->drm,
"New cdclk calculated to be logical %u kHz, actual %u kHz\n",
new_cdclk_state->logical.cdclk,
@@ -3297,7 +3396,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
@@ -3339,13 +3438,13 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
- dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+ dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ dev_priv->display.cdclk.max_dotclk_freq);
}
/**
@@ -3519,7 +3618,7 @@ static int i915_cdclk_info_show(struct seq_file *m, void *unused)
seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq);
return 0;
}
@@ -3534,13 +3633,6 @@ void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
i915, &i915_cdclk_info_fops);
}
-static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
- .get_cdclk = bxt_get_cdclk,
- .set_cdclk = bxt_set_cdclk,
- .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = rplu_calc_voltage_level,
-};
-
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3684,10 +3776,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
- dev_priv->display.cdclk.table = lnl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+ dev_priv->display.cdclk.table = xe2lpd_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 14) {
- dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
dev_priv->display.cdclk.table = mtl_cdclk_table;
} else if (IS_DG2(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0cd..cfdcdec07a4d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -18,6 +18,8 @@ struct intel_crtc_state;
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
+ /* This field is only valid for Xe2LPD and above. */
+ bool joined_mbus;
};
struct intel_cdclk_state {
@@ -51,6 +53,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
@@ -60,8 +65,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b);
+int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config);
+bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state);
void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct drm_i915_private *i915,
@@ -72,10 +80,13 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
bool *need_cdclk_calc);
+int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
-#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define to_intel_cdclk_state(global_state) \
+ container_of_const((global_state), struct intel_cdclk_state, base)
+
#define intel_atomic_get_old_cdclk_state(state) \
to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index ca7112b32cb3..d23163dc64d4 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -616,19 +616,19 @@ static void vlv_load_wgc_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe),
csc->coeff[2]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe),
csc->coeff[4] << 16 | csc->coeff[3]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe),
csc->coeff[5]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe),
+ intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe),
csc->coeff[8]);
}
@@ -639,25 +639,25 @@ static void vlv_read_wgc_csc(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe));
csc->coeff[2] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe));
csc->coeff[3] = tmp & 0xffff;
csc->coeff[4] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe));
csc->coeff[5] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe));
+ tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe));
csc->coeff[8] = tmp & 0xffff;
}
@@ -1227,7 +1227,7 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(dev_priv, PALETTE(pipe, i),
+ intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i),
i9xx_lut_8(&lut[i]));
}
@@ -1240,9 +1240,11 @@ static void i9xx_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i9xx_lut_10_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i9xx_lut_10_udw(&lut[i]));
}
}
@@ -1274,9 +1276,11 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
- intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ intel_de_write_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
@@ -3150,7 +3154,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -3176,8 +3181,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i9xx_lut_10_pack(&lut[i], ldw, udw);
}
@@ -3224,8 +3231,10 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
- u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+ u32 ldw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(dev_priv,
+ PALETTE(dev_priv, pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 9f4ae58f3e7e..bb99ea533842 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -8,7 +8,35 @@
#include "intel_display_reg_defs.h"
-/* legacy palette */
+/* GMCH palette */
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode ldw */
+#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
+#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
+#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode udw */
+#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
+#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
+#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
+#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
+#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
+#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
+#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
+#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
+#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
+#define PALETTE(dev_priv, pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
+
+/* ilk+ palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
/* see PALETTE_* for the bits */
@@ -228,12 +256,12 @@
#define _PIPE_A_WGC_C21_C20 0x600C0 /* s2.10 */
#define _PIPE_A_WGC_C22 0x600C4 /* s2.10 */
-#define PIPE_WGC_C01_C00(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C01_C00)
-#define PIPE_WGC_C02(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C02)
-#define PIPE_WGC_C11_C10(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C11_C10)
-#define PIPE_WGC_C12(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C12)
-#define PIPE_WGC_C21_C20(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C21_C20)
-#define PIPE_WGC_C22(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C22)
+#define PIPE_WGC_C01_C00(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C01_C00)
+#define PIPE_WGC_C02(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C02)
+#define PIPE_WGC_C11_C10(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C11_C10)
+#define PIPE_WGC_C12(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C12)
+#define PIPE_WGC_C21_C20(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C21_C20)
+#define PIPE_WGC_C22(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C22)
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index b0983edccf3f..0964e392d02c 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -25,28 +25,26 @@
4 * (dw))
#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
+#define CL_POWER_DOWN_ENABLE REG_BIT(4)
+#define SUS_CLOCK_CONFIG REG_GENMASK(1, 0)
#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-#define EDP4K2K_MODE_OVRD_EN (1 << 3)
-#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
+#define PG_SEQ_DELAY_OVERRIDE_MASK REG_GENMASK(26, 25)
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE REG_BIT(24)
+#define PWR_DOWN_LN_MASK REG_GENMASK(7, 4)
+#define PWR_UP_ALL_LANES REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x0)
+#define PWR_DOWN_LN_3_2_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xe)
+#define PWR_DOWN_LN_3_2 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xc)
+#define PWR_DOWN_LN_3 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x8)
+#define PWR_DOWN_LN_2_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x7)
+#define PWR_DOWN_LN_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x3)
+#define PWR_DOWN_LN_3_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xa)
+#define PWR_DOWN_LN_3_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xb)
+#define EDP4K2K_MODE_OVRD_EN REG_BIT(3)
+#define EDP4K2K_MODE_OVRD_OPTIMIZED REG_BIT(2)
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
-#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_LANE_ENABLE_AUX REG_BIT(0)
/* ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
@@ -54,24 +52,22 @@
_ICL_PORT_COMP + 4 * (dw))
#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
-#define COMP_INIT (1 << 31)
+#define COMP_INIT REG_BIT(31)
#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
+#define PROCESS_INFO_MASK REG_GENMASK(28, 26)
+#define PROCESS_INFO_DOT_0 REG_FIELD_PREP(PROCESS_INFO_MASK, 0)
+#define PROCESS_INFO_DOT_1 REG_FIELD_PREP(PROCESS_INFO_MASK, 1)
+#define PROCESS_INFO_DOT_4 REG_FIELD_PREP(PROCESS_INFO_MASK, 2)
+#define VOLTAGE_INFO_MASK REG_GENMASK(25, 24)
+#define VOLTAGE_INFO_0_85V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 0)
+#define VOLTAGE_INFO_0_95V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 1)
+#define VOLTAGE_INFO_1_05V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 2)
#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
-#define IREFGEN (1 << 24)
+#define IREFGEN REG_BIT(24)
#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
@@ -92,9 +88,9 @@
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
-#define COMMON_KEEPER_EN (1 << 26)
-#define LATENCY_OPTIM_MASK (0x3 << 2)
-#define LATENCY_OPTIM_VAL(x) ((x) << 2)
+#define COMMON_KEEPER_EN REG_BIT(26)
+#define LATENCY_OPTIM_MASK REG_GENMASK(3, 2)
+#define LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(LATENCY_OPTIM_MASK, (x))
/* ICL Port TX registers */
#define _ICL_PORT_TX_AUX 0x380
@@ -111,42 +107,49 @@
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
-#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
-#define SWING_SEL_UPPER_MASK (1 << 15)
-#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
-#define SWING_SEL_LOWER_MASK (0x7 << 11)
-#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
-#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
-#define RCOMP_SCALAR(x) ((x) << 0)
-#define RCOMP_SCALAR_MASK (0xFF << 0)
+#define SWING_SEL_UPPER_MASK REG_BIT(15)
+#define SWING_SEL_UPPER(x) REG_FIELD_PREP(SWING_SEL_UPPER_MASK, (x) >> 3)
+#define SWING_SEL_LOWER_MASK REG_GENMASK(13, 11)
+#define SWING_SEL_LOWER(x) REG_FIELD_PREP(SWING_SEL_LOWER_MASK, (x) & 0x7)
+#define FRC_LATENCY_OPTIM_MASK REG_GENMASK(10, 8)
+#define FRC_LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(FRC_LATENCY_OPTIM_MASK, (x))
+#define RCOMP_SCALAR_MASK REG_GENMASK(7, 0)
+#define RCOMP_SCALAR(x) REG_FIELD_PREP(RCOMP_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
-#define LOADGEN_SELECT (1 << 31)
-#define POST_CURSOR_1(x) ((x) << 12)
-#define POST_CURSOR_1_MASK (0x3F << 12)
-#define POST_CURSOR_2(x) ((x) << 6)
-#define POST_CURSOR_2_MASK (0x3F << 6)
-#define CURSOR_COEFF(x) ((x) << 0)
-#define CURSOR_COEFF_MASK (0x3F << 0)
+#define LOADGEN_SELECT REG_BIT(31)
+#define POST_CURSOR_1_MASK REG_GENMASK(17, 12)
+#define POST_CURSOR_1(x) REG_FIELD_PREP(POST_CURSOR_1_MASK, (x))
+#define POST_CURSOR_2_MASK REG_GENMASK(11, 6)
+#define POST_CURSOR_2(x) REG_FIELD_PREP(POST_CURSOR_2_MASK, (x))
+#define CURSOR_COEFF_MASK REG_GENMASK(5, 0)
+#define CURSOR_COEFF(x) REG_FIELD_PREP(CURSOR_COEFF_MASK, (x))
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
-#define TX_TRAINING_EN (1 << 31)
-#define TAP2_DISABLE (1 << 30)
-#define TAP3_DISABLE (1 << 29)
-#define SCALING_MODE_SEL(x) ((x) << 18)
-#define SCALING_MODE_SEL_MASK (0x7 << 18)
-#define RTERM_SELECT(x) ((x) << 3)
-#define RTERM_SELECT_MASK (0x7 << 3)
+#define TX_TRAINING_EN REG_BIT(31)
+#define TAP2_DISABLE REG_BIT(30)
+#define TAP3_DISABLE REG_BIT(29)
+#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
+#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
+#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
+#define RTERM_SELECT(x) REG_FIELD_PREP(RTERM_SELECT_MASK, (x))
+
+#define ICL_PORT_TX_DW6_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(6, phy))
+#define ICL_PORT_TX_DW6_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(6, phy))
+#define ICL_PORT_TX_DW6_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(6, ln, phy))
+#define O_FUNC_OVRD_EN REG_BIT(7)
+#define O_LDO_REF_SEL_CRI REG_GENMASK(6, 1)
+#define O_LDO_BYPASS_CRI REG_BIT(0)
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
-#define N_SCALAR(x) ((x) << 24)
-#define N_SCALAR_MASK (0x7F << 24)
+#define N_SCALAR_MASK REG_GENMASK(30, 24)
+#define N_SCALAR(x) REG_FIELD_PREP(N_SCALAR_MASK, (x))
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 93479db0f89f..10e95dc425a6 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -348,7 +348,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -356,9 +356,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4bcf446c75f4..ccaa4cb2809b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -12,33 +12,31 @@
#include "intel_hdmi.h"
#include "intel_vrr.h"
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+static void intel_dump_crtc_timings(struct drm_printer *p,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
- "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
- "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
- "flags=0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
- mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
- mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->flags);
+ drm_printf(p, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+intel_dump_m_n_config(struct drm_printer *p,
+ const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
- drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->data_m, m_n->data_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ drm_printf(p, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -52,17 +50,7 @@ intel_dump_infoframe(struct drm_i915_private *i915,
}
static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
- const struct drm_dp_vsc_sdp *vsc)
-{
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
-
- drm_dp_vsc_sdp_log(&p, vsc);
-}
-
-static void
-intel_dump_buffer(struct drm_i915_private *i915,
- const char *prefix, const u8 *buf, size_t len)
+intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
@@ -130,71 +118,66 @@ const char *intel_output_format_name(enum intel_output_format format)
return output_format_str[format];
}
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+static void intel_dump_plane_state(struct drm_printer *p,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fb) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
return;
}
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
+ drm_printf(p, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_printf(p, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
- drm_dbg_kms(&i915->drm,
- "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_printf(p, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void
-ilk_dump_csc(struct drm_i915_private *i915, const char *name,
+ilk_dump_csc(struct drm_i915_private *i915,
+ struct drm_printer *p,
+ const char *name,
const struct intel_csc_matrix *csc)
{
int i;
- drm_dbg_kms(&i915->drm,
- "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->preoff[0], csc->preoff[1], csc->preoff[2]);
+ drm_printf(p, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->preoff[0], csc->preoff[1], csc->preoff[2]);
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
if (DISPLAY_VER(i915) < 7)
return;
- drm_dbg_kms(&i915->drm,
- "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
- csc->postoff[0], csc->postoff[1], csc->postoff[2]);
+ drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->postoff[0], csc->postoff[1], csc->postoff[2]);
}
static void
-vlv_dump_csc(struct drm_i915_private *i915, const char *name,
+vlv_dump_csc(struct drm_printer *p, const char *name,
const struct intel_csc_matrix *csc)
{
int i;
for (i = 0; i < 3; i++)
- drm_dbg_kms(&i915->drm,
- "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
- csc->coeff[3 * i + 0],
- csc->coeff[3 * i + 1],
- csc->coeff[3 * i + 2]);
+ drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
}
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
@@ -205,85 +188,86 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct drm_printer p;
char buf[64];
int i;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
- crtc->base.base.id, crtc->base.name,
- str_yes_no(pipe_config->hw.enable), context);
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- drm_dbg_kms(&i915->drm,
- "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
- str_yes_no(pipe_config->hw.active),
- buf, pipe_config->output_types,
- intel_output_format_name(pipe_config->output_format),
- intel_output_format_name(pipe_config->sink_format));
-
- drm_dbg_kms(&i915->drm,
- "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
-
- drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
-
- drm_dbg_kms(&i915->drm,
- "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
- transcoder_name(pipe_config->master_transcoder),
- pipe_config->sync_mode_slaves_mask);
-
- drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
- intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
- intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
- pipe_config->bigjoiner_pipes);
-
- drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
- str_enabled_disabled(pipe_config->splitter.enable),
- pipe_config->splitter.link_count,
- pipe_config->splitter.pixel_overlap);
+ drm_printf(&p, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ intel_output_format_name(pipe_config->output_format),
+ intel_output_format_name(pipe_config->sink_format));
+
+ drm_printf(&p, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_printf(&p, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_printf(&p, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_printf(&p, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_printf(&p, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
if (pipe_config->has_pch_encoder)
- intel_dump_m_n_config(pipe_config, "fdi",
+ intel_dump_m_n_config(&p, pipe_config, "fdi",
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
if (intel_crtc_has_dp_encoder(pipe_config)) {
- intel_dump_m_n_config(pipe_config, "dp m_n",
+ intel_dump_m_n_config(&p, pipe_config, "dp m_n",
pipe_config->lane_count,
&pipe_config->dp_m_n);
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ intel_dump_m_n_config(&p, pipe_config, "dp m2_n2",
pipe_config->lane_count,
&pipe_config->dp_m2_n2);
- drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
- str_enabled_disabled(pipe_config->fec_enable),
- str_enabled_disabled(pipe_config->enhanced_framing));
-
- drm_dbg_kms(&i915->drm, "sdp split: %s\n",
- str_enabled_disabled(pipe_config->sdp_split_enable));
-
- drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
- str_enabled_disabled(pipe_config->has_psr),
- str_enabled_disabled(pipe_config->has_psr2),
- str_enabled_disabled(pipe_config->has_panel_replay),
- str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "fec: %s, enhanced framing: %s\n",
+ str_enabled_disabled(pipe_config->fec_enable),
+ str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_printf(&p, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_printf(&p, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
- drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_dbg_kms(&i915->drm,
- "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
- pipe_config->infoframes.gcp);
+ drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
@@ -301,91 +285,88 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
- intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ intel_dump_buffer("ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
-
- drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.mode));
- drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
- intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
- drm_dbg_kms(&i915->drm,
- "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
- pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
-
- drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
- pipe_config->linetime, pipe_config->ips_linetime);
+ drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_printf(&p, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.adjusted_mode);
+ drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_printf(&p, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (DISPLAY_VER(i915) >= 9)
- drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id,
- pipe_config->hw.scaling_filter);
+ drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
- drm_dbg_kms(&i915->drm,
- "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- drm_dbg_kms(&i915->drm,
- "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
- DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- str_enabled_disabled(pipe_config->pch_pfit.enabled),
- str_yes_no(pipe_config->pch_pfit.force_thru));
+ drm_printf(&p, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
- drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide,
- pipe_config->has_drrs);
+ drm_printf(&p, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
- intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+ intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(i915))
- drm_dbg_kms(&i915->drm,
- "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- drm_dbg_kms(&i915->drm,
- "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
-
- drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
- pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
- pipe_config->pre_csc_lut ?
- drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
- pipe_config->post_csc_lut ?
- drm_color_lut_size(pipe_config->post_csc_lut) : 0);
+ drm_printf(&p, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
+ pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
+ i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ pipe_config->pre_csc_lut ?
+ drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
+ pipe_config->post_csc_lut ?
+ drm_color_lut_size(pipe_config->post_csc_lut) : 0);
if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, "output csc", &pipe_config->output_csc);
+ ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, "pipe csc", &pipe_config->csc);
+ ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
else if (IS_CHERRYVIEW(i915))
- vlv_dump_csc(i915, "cgm csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
else if (IS_VALLEYVIEW(i915))
- vlv_dump_csc(i915, "wgc csc", &pipe_config->csc);
+ vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
dump_planes:
if (!state)
@@ -393,6 +374,6 @@ dump_planes:
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->pipe == crtc->pipe)
- intel_dump_plane_state(plane_state);
+ intel_dump_plane_state(&p, plane_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index f8b33999d43f..23a122ee20c9 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = i915_gem_object_get_dma_address(obj, 0);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -511,6 +509,24 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
+static void wa_16021440873(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 ctl = plane_state->ctl;
+ int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
+ enum pipe pipe = plane->pipe;
+
+ ctl &= ~MCURSOR_MODE_MASK;
+ ctl |= MCURSOR_MODE_64_2B;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), ctl);
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
+ PIPESRC_HEIGHT(et_y_position));
+}
+
static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -531,7 +547,11 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
} else {
- i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ /* Wa_16021440873 */
+ if (crtc_state->enable_psr2_su_region_et)
+ wa_16021440873(plane, crtc_state, plane_state);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
}
}
@@ -823,6 +843,28 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.format_mod_supported = intel_cursor_format_mod_supported,
};
+static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_mode_config *config = &i915->drm.mode_config;
+ struct drm_plane_size_hint hints[4];
+ int size, max_size, num_hints = 0;
+
+ max_size = min(config->cursor_width, config->cursor_height);
+
+ /* for simplicity only enumerate the supported square+POT sizes */
+ for (size = 64; size <= max_size; size *= 2) {
+ if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints)))
+ break;
+
+ hints[num_hints].width = size;
+ hints[num_hints].height = size;
+ num_hints++;
+ }
+
+ drm_plane_add_size_hints_property(&plane->base, hints, num_hints);
+}
+
struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
@@ -881,6 +923,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ intel_cursor_add_size_hints_property(cursor);
+
zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 64e0f820a789..8e3b13884bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -29,8 +29,11 @@
#define INTEL_CX0_LANE1 BIT(1)
#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
-bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
+
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
@@ -46,8 +49,7 @@ static int lane_mask_to_lane(u8 lane_mask)
return ilog2(lane_mask);
}
-static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
- struct intel_encoder *encoder)
+static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -114,16 +116,20 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
- enum port port, int lane)
+static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
@@ -135,20 +141,22 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
return;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
+static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
int command, int lane, u32 *val)
{
- enum phy phy = intel_port_to_phy(i915, port);
-
- if (__intel_de_wait_for_register(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+
+ if (intel_de_wait_custom(i915,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_FAST_US,
+ XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -158,31 +166,33 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
"PHY %c Hardware did not detect a timeout\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (*val & XELPDP_PORT_P2M_ERROR_SET) {
drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
return 0;
}
-static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_read_once(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -191,7 +201,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -200,33 +210,34 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
if (ack < 0)
return ack;
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
-static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 __intel_cx0_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to read successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_read_once(i915, port, lane, addr);
+ status = __intel_cx0_read_once(encoder, lane, addr);
if (status >= 0)
return status;
@@ -238,18 +249,20 @@ static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
return 0;
}
-static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 intel_cx0_read(struct intel_encoder *encoder,
u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
- return __intel_cx0_read(i915, port, lane, addr);
+ return __intel_cx0_read(encoder, lane, addr);
}
-static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
+static int __intel_cx0_write_once(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
@@ -258,7 +271,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
@@ -274,45 +287,46 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
if (committed) {
- ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
- intel_clear_response_ready_flag(i915, port, lane);
+ intel_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
return 0;
}
-static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_write(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
assert_dc_off(i915);
/* 3 tries is assumed to be enough to write successfully */
for (i = 0; i < 3; i++) {
- status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
+ status = __intel_cx0_write_once(encoder, lane, addr, data, committed);
if (status == 0)
return;
@@ -322,63 +336,66 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_write(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_write(i915, port, lane, addr, data, committed);
+ __intel_cx0_write(encoder, lane, addr, data, committed);
}
-static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
+static void intel_c20_sram_write(struct intel_encoder *encoder,
int lane, u16 addr, u16 data)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
}
-static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
+static u16 intel_c20_sram_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u16 val;
assert_dc_off(i915);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
- intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
- val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
+ val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H);
val <<= 8;
- val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
+ val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L);
return val;
}
-static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void __intel_cx0_rmw(struct intel_encoder *encoder,
int lane, u16 addr, u8 clear, u8 set, bool committed)
{
u8 old, val;
- old = __intel_cx0_read(i915, port, lane, addr);
+ old = __intel_cx0_read(encoder, lane, addr);
val = (old & ~clear) | set;
if (val != old)
- __intel_cx0_write(i915, port, lane, addr, val, committed);
+ __intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+static void intel_cx0_rmw(struct intel_encoder *encoder,
u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
for_each_cx0_lane_in_mask(lane_mask, lane)
- __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
+ __intel_cx0_rmw(encoder, lane, addr, clear, set, committed);
}
static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
@@ -414,7 +431,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
@@ -423,7 +439,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
return;
- owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -433,14 +449,14 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
return;
}
- if (intel_is_c10phy(i915, phy)) {
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder)) {
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3),
C10_CMN3_TXVBOOST_MASK,
C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
MB_WRITE_UNCOMMITTED);
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1),
C10_TX1_TERMCTL_MASK,
C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
MB_WRITE_COMMITTED);
@@ -455,27 +471,27 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
if (!(lane_mask & owned_lane_mask))
continue;
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
MB_WRITE_COMMITTED);
}
/* Write Override enables in 0xD71 */
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD,
0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
MB_WRITE_COMMITTED);
- if (intel_is_c10phy(i915, phy))
- intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -1811,7 +1827,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state;
+ struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
int i;
if (intel_crtc_has_dp_encoder(crtc_state)) {
@@ -1843,7 +1859,7 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c10 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
intel_c10pll_update_pll(crtc_state, encoder);
return 0;
@@ -1856,7 +1872,6 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
intel_wakeref_t wakeref;
int i;
@@ -1867,16 +1882,15 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
* According to C10 VDR Register programming Sequence we need
* to do this to read PHY internal registers from MsgBus.
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, lane, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
- PHY_C10_VDR_PLL(i));
+ pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i));
- pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
- pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+ pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0));
+ pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@@ -1885,31 +1899,31 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
+ const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10;
int i;
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
/* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
pll_state->pll[i],
(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
}
@@ -2037,10 +2051,8 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
{
struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
- struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(&dig_port->base))
return intel_c10_phy_check_hdmi_link_rate(clock);
return intel_c20_phy_check_hdmi_link_rate(clock);
}
@@ -2067,7 +2079,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
/* try computed C20 HDMI tables before using consolidated tables */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock,
- &crtc_state->cx0pll_state.c20) == 0)
+ &crtc_state->dpll_hw_state.cx0pll.c20) == 0)
return 0;
}
@@ -2077,7 +2089,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->cx0pll_state.c20 = *tables[i];
+ crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
return 0;
}
}
@@ -2088,10 +2100,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_state(crtc_state, encoder);
return intel_c20pll_calc_state(crtc_state, encoder);
}
@@ -2149,7 +2158,6 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c20pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
intel_wakeref_t wakeref;
int i;
@@ -2157,25 +2165,25 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
/* Read Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_TX_CNTX_CFG(i));
else
- pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_TX_CNTX_CFG(i));
}
/* Read common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_CMN_CNTX_CFG(i));
else
- pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_CMN_CNTX_CFG(i));
}
@@ -2183,20 +2191,20 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i));
else
- pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i));
}
} else {
/* MPLLA configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i));
else
- pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i));
}
}
@@ -2327,7 +2335,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
+ const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
u32 clock = crtc_state->port_clock;
@@ -2338,7 +2346,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
dp = true;
/* 1. Read current context selection */
- cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2347,7 +2355,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
*/
if (intel_c20_protocol_switch_valid(encoder)) {
for (i = 0; i < 4; i++)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
usleep_range(4000, 4100);
}
@@ -2355,63 +2363,63 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 3.1 Tx configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
}
/* 3.2 common configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
}
/* 3.3 mpllb or mplla configuration */
if (intel_c20phy_use_mpllb(pll_state)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
else
- intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
}
}
/* 4. Program custom width to match the link protocol */
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
- intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2420,7 +2428,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
- intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
@@ -2476,9 +2484,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
- val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+ val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
@@ -2508,11 +2516,12 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
- enum port port,
+static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
u8 lane_mask, u8 state)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
@@ -2528,7 +2537,7 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
- intel_cx0_bus_reset(i915, port, lane);
+ intel_cx0_bus_reset(encoder, lane);
}
intel_de_rmw(i915, buf_ctl2_reg,
@@ -2536,15 +2545,18 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask), 0,
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
-static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
+static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
@@ -2577,13 +2589,13 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
return val;
}
-static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
- struct intel_encoder *encoder,
+static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
bool lane_reversal)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(i915, port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
@@ -2593,19 +2605,19 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ lane_phy_current_status, lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
@@ -2613,16 +2625,16 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
- intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_RESET);
- intel_cx0_setup_powerdown(i915, port);
+ intel_cx0_setup_powerdown(encoder);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
@@ -2640,11 +2652,10 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
int i;
u8 disables;
bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
- enum port port = encoder->port;
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
@@ -2666,14 +2677,14 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
if (!(owned_lane_mask & lane_mask))
continue;
- intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
+ intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
CONTROL2_DISABLE_SINGLE_TX,
disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
MB_WRITE_COMMITTED);
}
- if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, owned_lane_mask,
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
@@ -2705,7 +2716,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
@@ -2719,13 +2730,13 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
/* 2. Bring PHY out of reset. */
- intel_cx0_phy_lane_reset(i915, encoder, lane_reversal);
+ intel_cx0_phy_lane_reset(encoder, lane_reversal);
/*
* 3. Change Phy power state to Ready.
* TODO: For DP alt mode use only one lane.
*/
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_READY);
/*
@@ -2735,7 +2746,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
*/
/* 5. Program PHY internal PLL internal registers. */
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10_pll_program(i915, crtc_state, encoder);
else
intel_c20_pll_program(i915, crtc_state, encoder);
@@ -2767,10 +2778,10 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
@@ -2831,7 +2842,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
/*
@@ -2858,10 +2869,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK,
+ XELPDP_TBT_CLOCK_ACK,
+ 100, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -2892,12 +2903,12 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
static void intel_cx0pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_c10 = intel_is_c10phy(i915, phy);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ bool is_c10 = intel_encoder_is_c10phy(encoder);
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
/* 1. Change owned PHY lane power to Disable state. */
- intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
is_c10 ? CX0_P2PG_STATE_DISABLE :
CX0_P4PG_STATE_DISABLE);
@@ -2920,10 +2931,10 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
@@ -2944,7 +2955,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
/*
* 1. Follow the Display Voltage Frequency Switching Sequence Before
@@ -2958,8 +2969,8 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
+ if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@@ -3014,7 +3025,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
struct intel_c10pll_state *mpllb_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10;
int i;
if (intel_crtc_needs_fastset(state))
@@ -3043,10 +3054,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
else
intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
@@ -3055,10 +3063,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
@@ -3070,7 +3075,7 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
struct intel_c20pll_state *mpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20;
bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
@@ -3124,7 +3129,6 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- enum phy phy;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3138,14 +3142,13 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
return;
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
- phy = intel_port_to_phy(i915, encoder->port);
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
else
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c6682677253a..3e03af3e006c 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -11,7 +11,6 @@
#include <linux/bits.h>
enum icl_port_dpll_id;
-enum phy;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -22,7 +21,7 @@ struct intel_crtc_state;
struct intel_encoder;
struct intel_hdmi;
-bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mtl_pll_disable(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c587a8efeafc..3c3fc53376ce 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -200,10 +200,10 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
port_name(port));
}
-static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
- enum port port)
+static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
int timeout_us;
int ret;
@@ -218,7 +218,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
timeout_us = 1200;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
timeout_us = 3000;
else
timeout_us = 1000;
@@ -331,7 +331,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
intel_dp->DP = dig_port->saved_port_bits |
@@ -345,7 +344,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -632,6 +631,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -662,10 +662,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
+ if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- drm_dbg_kms(&dev_priv->drm,
- "Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -895,7 +894,6 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
/*
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -914,7 +912,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
else if (DISPLAY_VER(i915) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(i915, phy)))
+ intel_encoder_is_tc(&dig_port->base)))
return intel_aux_power_domain(dig_port);
else
return POWER_DOMAIN_INVALID;
@@ -984,7 +982,7 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
@@ -1113,7 +1111,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
@@ -1176,7 +1174,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1227,7 +1225,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1328,7 +1326,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1526,7 +1524,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1540,7 +1538,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1549,7 +1547,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder)
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1558,7 +1556,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
@@ -1570,7 +1568,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1584,7 +1582,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1593,7 +1591,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1602,7 +1600,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1614,7 +1612,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1637,7 +1635,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1646,7 +1644,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1655,7 +1653,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
enum intel_dpll_id id;
u32 val;
@@ -1680,7 +1678,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
if (drm_WARN_ON(&i915->drm, !pll))
return;
@@ -1694,7 +1692,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1703,7 +1701,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1712,7 +1710,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
@@ -1767,7 +1765,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
if (drm_WARN_ON(&i915->drm, !pll))
@@ -1787,7 +1785,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
mutex_lock(&i915->display.dpll.lock);
@@ -1803,7 +1801,7 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
@@ -1820,7 +1818,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
@@ -2086,12 +2084,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (!intel_phy_is_tc(dev_priv, phy) ||
+ if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
@@ -2327,9 +2324,9 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_combo(i915, phy)) {
+ if (intel_encoder_is_combo(encoder)) {
+ enum phy phy = intel_encoder_to_phy(encoder);
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -2339,10 +2336,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
-/* Splitter enable for eDP MSO is limited to certain pipes. */
+/*
+ * Splitter enable for eDP MSO is limited to certain pipes, on certain
+ * platforms.
+ */
static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(i915) > 20)
+ return ~0;
+ else if (IS_ALDERLAKE_P(i915))
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2812,15 +2814,14 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv)) {
+ if (HAS_DP20(dev_priv))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
- if (crtc_state->has_panel_replay)
- drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
- DP_PANEL_REPLAY_ENABLE);
- }
+
+ /* Panel replay has to be enabled in sink dpcd before link training. */
+ if (crtc_state->has_panel_replay)
+ intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3095,39 +3096,48 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void intel_ddi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *slave_crtc;
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
- intel_disable_transcoder(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
- intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_dsc_disable(old_pipe_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_pipe_crtc_state);
else
- ilk_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_pipe_crtc_state);
}
+}
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
- const struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- intel_crtc_vblank_off(old_slave_crtc_state);
-
- intel_dsc_disable(old_slave_crtc_state);
- skl_scaler_disable(old_slave_crtc_state);
- }
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_post_disable_hdmi_or_sst(state, encoder, old_crtc_state,
+ old_conn_state);
/*
* When called from DP MST code:
@@ -3155,14 +3165,11 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
main_link_aux_power_domain_put(dig_port, old_crtc_state);
- if (is_tc_port)
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_put_link(dig_port);
}
@@ -3263,7 +3270,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -3347,14 +3353,14 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3362,10 +3368,10 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *pipe_crtc;
- if (!intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3374,7 +3380,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
- intel_crtc_vblank_on(crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
@@ -3470,19 +3482,17 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc *slave_crtc;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy))
+ if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
return;
- intel_update_active_dpll(state, crtc, encoder);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state))
- intel_update_active_dpll(state, slave_crtc, encoder);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(crtc_state))
+ intel_update_active_dpll(state, pipe_crtc, encoder);
}
static void
@@ -3493,8 +3503,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ bool is_tc_port = intel_encoder_is_tc(encoder);
if (is_tc_port) {
struct intel_crtc *master_crtc =
@@ -3513,14 +3522,14 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_phy_set_lane_optim_mask(encoder,
- crtc_state->lane_lat_optim_mask);
+ bxt_dpio_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
}
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum tc_port tc_port = intel_encoder_to_tc(encoder);
int ln;
for (ln = 0; ln < 2; ln++)
@@ -3574,7 +3583,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3624,7 +3633,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
- intel_wait_ddi_buf_active(dev_priv, port);
+ intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -3681,7 +3690,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
if (intel_de_wait_for_set(dev_priv,
dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 1))
+ DP_TP_STATUS_IDLE_DONE, 2))
drm_err(&dev_priv->drm,
"Timed out waiting for DP idle patterns\n");
}
@@ -3946,7 +3955,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+ bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -3972,6 +3981,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_ADAPTIVE_SYNC);
intel_audio_codec_get_config(encoder, pipe_config);
}
@@ -4006,8 +4016,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
} else {
- intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4016,8 +4026,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder,
static void dg2_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
- crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+ intel_mpllb_readout_hw_state(encoder, &crtc_state->dpll_hw_state.mpllb);
+ crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->dpll_hw_state.mpllb);
intel_ddi_get_config(encoder, crtc_state);
}
@@ -4144,10 +4154,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
@@ -4159,10 +4166,9 @@ static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
bool fastset = true;
- if (intel_phy_is_tc(i915, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -4226,7 +4232,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -4256,7 +4262,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
@@ -4348,10 +4359,9 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp_encoder_flush_work(encoder);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_cleanup(dig_port);
intel_display_power_flush_work(i915);
@@ -4362,16 +4372,14 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp->reset_link_params = true;
intel_pps_encoder_reset(intel_dp);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_init_mode(dig_port);
}
@@ -4538,11 +4546,9 @@ static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
- enum phy phy = intel_port_to_phy(i915, encoder->port);
- bool is_tc = intel_phy_is_tc(i915, phy);
+ bool is_tc = intel_encoder_is_tc(encoder);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
@@ -4824,10 +4830,7 @@ static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- return init_dp || intel_phy_is_tc(i915, phy);
+ return init_dp || intel_encoder_is_tc(encoder);
}
static bool assert_has_icl_dsi(struct drm_i915_private *i915)
@@ -5071,17 +5074,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
} else if (IS_DG2(dev_priv)) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
}
@@ -5126,7 +5129,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
goto err;
}
- if (intel_phy_is_tc(dev_priv, phy)) {
+ if (intel_encoder_is_tc(encoder)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
@@ -5155,7 +5158,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index de809e2d9cac..4d21ce734343 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1691,14 +1691,11 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_is_c10phy(i915, phy))
+ else if (!intel_encoder_is_c10phy(encoder))
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
@@ -1707,14 +1704,13 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
if (DISPLAY_VER(i915) >= 14) {
encoder->get_buf_trans = mtl_get_cx0_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
@@ -1725,16 +1721,16 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
} else if (IS_DG1(i915)) {
encoder->get_buf_trans = dg1_get_combo_buf_trans;
} else if (DISPLAY_VER(i915) >= 12) {
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = tgl_get_combo_buf_trans;
else
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
} else if (DISPLAY_VER(i915) == 11) {
- if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
+ if (IS_JASPERLAKE(i915))
encoder->get_buf_trans = jsl_get_combo_buf_trans;
- else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+ else if (IS_ELKHARTLAKE(i915))
encoder->get_buf_trans = ehl_get_combo_buf_trans;
- else if (intel_phy_is_combo(i915, phy))
+ else if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = icl_get_combo_buf_trans;
else
encoder->get_buf_trans = icl_get_mg_buf_trans;
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 42552d8c151e..e881bfeafb47 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -10,80 +10,185 @@
#include "i915_trace.h"
#include "intel_uncore.h"
+static inline struct intel_uncore *__to_uncore(struct intel_display *display)
+{
+ return &to_i915(display->drm)->uncore;
+}
+
static inline u32
-intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read(&i915->uncore, reg);
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
-intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read8(&i915->uncore, reg);
+ u8 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = intel_uncore_read8(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
}
+#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__)
static inline u64
-intel_de_read64_2x32(struct drm_i915_private *i915,
- i915_reg_t lower_reg, i915_reg_t upper_reg)
+__intel_de_read64_2x32(struct intel_display *display,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
{
- return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+ u64 val;
+
+ intel_dmc_wl_get(display, lower_reg);
+ intel_dmc_wl_get(display, upper_reg);
+
+ val = intel_uncore_read64_2x32(__to_uncore(display), lower_reg,
+ upper_reg);
+
+ intel_dmc_wl_put(display, upper_reg);
+ intel_dmc_wl_put(display, lower_reg);
+
+ return val;
}
+#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
- intel_uncore_posting_read(&i915->uncore, reg);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_posting_read(__to_uncore(display), reg);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
- intel_uncore_write(&i915->uncore, reg, val);
+ intel_dmc_wl_get(display, reg);
+
+ intel_uncore_write(__to_uncore(display), reg, val);
+
+ intel_dmc_wl_put(display, reg);
}
+#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
+ u32 clear, u32 set)
{
- return intel_uncore_rmw(&i915->uncore, reg, clear, set);
+ return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
}
+#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__)
+
+static inline u32
+__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
+ u32 set)
+{
+ u32 val;
+
+ intel_dmc_wl_get(display, reg);
+
+ val = __intel_de_rmw_nowl(display, reg, clear, set);
+
+ intel_dmc_wl_put(display, reg);
+
+ return val;
+}
+#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+____intel_de_wait_for_register_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+ return intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout);
}
+#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+__intel_de_wait(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
+ timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
- return __intel_wait_for_register(&i915->uncore, reg, mask, value,
- fast_timeout_us, slow_timeout_ms, out_value);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
{
- return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+ int ret;
+
+ intel_dmc_wl_get(display, reg);
+
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value,
+ fast_timeout_us, slow_timeout_ms, out_value);
+
+ intel_dmc_wl_put(display, reg);
+
+ return ret;
}
+#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__)
static inline int
-intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
u32 mask, unsigned int timeout)
{
- return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+ return intel_de_wait(display, reg, mask, mask, timeout);
+}
+#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
+
+static inline int
+__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait(display, reg, mask, 0, timeout);
}
+#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -94,33 +199,38 @@ intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
- val = intel_uncore_read_fw(&i915->uncore, reg);
+ val = intel_uncore_read_fw(__to_uncore(display), reg);
trace_i915_reg_rw(false, reg, val, sizeof(val), true);
return val;
}
+#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
- intel_uncore_write_fw(&i915->uncore, reg, val);
+ intel_uncore_write_fw(__to_uncore(display), reg, val);
}
+#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
-intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
+__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
- return intel_uncore_read_notrace(&i915->uncore, reg);
+ return intel_uncore_read_notrace(__to_uncore(display), reg);
}
+#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__)
static inline void
-intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg,
+ u32 val)
{
- intel_uncore_write_notrace(&i915->uncore, reg, val);
+ intel_uncore_write_notrace(__to_uncore(display), reg, val);
}
+#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__)
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ab2f52d21bad..273323f30ae2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -85,7 +85,6 @@
#include "intel_dvo.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -120,6 +119,7 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -275,6 +275,13 @@ static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
return hweight8(crtc_state->bigjoiner_pipes);
}
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return BIT(crtc->pipe) | crtc_state->bigjoiner_pipes;
+}
+
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
@@ -383,8 +390,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
break;
}
- if (intel_de_wait_for_register(dev_priv, dpll_reg,
- port_mask, expected_mask, 1000))
+ if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -430,6 +436,18 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
+ u32 set = 0;
+
+ if (DISPLAY_VER(dev_priv) == 14)
+ set |= DP_FEC_BS_JITTER_WA;
+
+ intel_de_rmw(dev_priv,
+ hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ clear, set);
+ }
+
val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
@@ -437,6 +455,14 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
return;
}
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ new_crtc_state->dsc.compression_enable) {
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+ val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
+ TRANSCONF_PIXEL_COUNT_SCALING_X4);
+ }
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
@@ -483,6 +509,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
+ /* Wa_1409098942:adlp+ */
+ if (DISPLAY_VER(dev_priv) >= 13 &&
+ old_crtc_state->dsc.compression_enable)
+ val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
+
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
@@ -535,7 +566,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
- (plane->fbc &&
+ (plane->fbc && !plane_state->no_fbc_reason &&
plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
@@ -1552,18 +1583,21 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool apply)
+/* Display WA #1180: WaDisableScalarClockGating: glk */
+static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
- u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (apply)
- val |= mask;
- else
- val &= ~mask;
+ return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+}
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
+static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
@@ -1586,24 +1620,6 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
-static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
-
- /*
- * Enable sequence steps 1-7 on bigjoiner master
- */
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_pll_enable(state, master_crtc);
-
- if (crtc_state->shared_dpll)
- intel_enable_shared_dpll(crtc_state);
-
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_encoders_pre_enable(state, master_crtc);
-}
-
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1639,90 +1655,107 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
- bool psl_clkgate_wa;
+ struct intel_crtc *pipe_crtc;
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state))
+ intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
- if (!new_crtc_state->bigjoiner_pipes) {
- intel_encoders_pre_pll_enable(state, crtc);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (new_crtc_state->shared_dpll)
- intel_enable_shared_dpll(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_encoders_pre_enable(state, crtc);
- } else {
- icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
+ if (pipe_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(pipe_crtc_state);
}
- intel_dsc_enable(new_crtc_state);
+ intel_encoders_pre_enable(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 13)
- intel_uncompressed_joiner_enable(new_crtc_state);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- intel_set_pipe_src_size(new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipe_misc(new_crtc_state);
+ intel_dsc_enable(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(pipe_crtc_state);
+
+ intel_set_pipe_src_size(pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipe_misc(pipe_crtc_state);
+ }
- if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
- !transcoder_is_dsi(cpu_transcoder))
+ if (!transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
- crtc->active = true;
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- /* Display WA #1180: WaDisableScalarClockGating: glk */
- psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
- new_crtc_state->pch_pfit.enabled;
- if (psl_clkgate_wa)
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+ pipe_crtc->active = true;
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_pfit_enable(new_crtc_state);
- else
- ilk_pfit_enable(new_crtc_state);
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- /*
- * On ILK+ LUT must be loaded before the pipe is running but with
- * clocks enabled
- */
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma/csc for pipe bottom color */
- if (DISPLAY_VER(dev_priv) < 9)
- intel_disable_primary_plane(new_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_pfit_enable(pipe_crtc_state);
+ else
+ ilk_pfit_enable(pipe_crtc_state);
- hsw_set_linetime_wm(new_crtc_state);
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_color_load_luts(pipe_crtc_state);
+ intel_color_commit_noarm(pipe_crtc_state);
+ intel_color_commit_arm(pipe_crtc_state);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (DISPLAY_VER(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_set_pipe_chicken(new_crtc_state);
+ hsw_set_linetime_wm(pipe_crtc_state);
- intel_initial_watermarks(state, crtc);
+ if (DISPLAY_VER(dev_priv) >= 11)
+ icl_set_pipe_chicken(pipe_crtc_state);
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- intel_crtc_vblank_on(new_crtc_state);
+ intel_initial_watermarks(state, pipe_crtc);
+ }
intel_encoders_enable(state, crtc);
- if (psl_clkgate_wa) {
- intel_crtc_wait_for_next_vblank(crtc);
- glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
- }
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+ enum pipe hsw_workaround_pipe;
- /* If we change the relative order between pipe/planes enabling, we need
- * to change the workaround. */
- hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- struct intel_crtc *wa_crtc;
+ if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
+ intel_crtc_wait_for_next_vblank(pipe_crtc);
+ glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
+ }
- wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+ /*
+ * If we change the relative order between pipe/planes
+ * enabling, we need to change the workaround.
+ */
+ hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ struct intel_crtc *wa_crtc =
+ intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
- intel_crtc_wait_for_next_vblank(wa_crtc);
- intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ }
}
}
@@ -1786,29 +1819,28 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *pipe_crtc;
/*
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- intel_encoders_disable(state, crtc);
- intel_encoders_post_disable(state, crtc);
- }
-
- intel_disable_shared_dpll(old_crtc_state);
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
- if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- intel_encoders_post_pll_disable(state, crtc);
+ intel_disable_shared_dpll(old_pipe_crtc_state);
+ }
- intel_dmc_disable_pipe(i915, crtc->pipe);
+ intel_encoders_post_pll_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
- intel_dmc_disable_pipe(i915, slave_crtc->pipe);
- }
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -1836,6 +1868,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
+/* Prefer intel_encoder_is_combo() */
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
@@ -1857,6 +1890,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_tc() */
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1877,6 +1911,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
return false;
}
+/* Prefer intel_encoder_is_snps() */
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
@@ -1886,6 +1921,7 @@ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
+/* Prefer intel_encoder_to_phy() */
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
@@ -1903,6 +1939,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
return PHY_A + port - PORT_A;
}
+/* Prefer intel_encoder_to_tc() */
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
@@ -1914,6 +1951,41 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
return TC_PORT_1 + port - PORT_C;
}
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_phy(i915, encoder->port);
+}
+
+bool intel_encoder_is_combo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_snps(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+}
+
+bool intel_encoder_is_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+}
+
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return intel_port_to_tc(i915, encoder->port);
+}
+
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
@@ -2381,7 +2453,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->max_dotclk_freq;
+ int clock_limit = i915->display.cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2405,7 +2477,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->max_dotclk_freq;
+ clock_limit = i915->display.cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
@@ -2709,15 +2781,6 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
-
- if (!crtc_state->enable_psr2_su_region_et)
- return;
-
- width = drm_rect_width(&crtc_state->psr2_su_area);
- height = drm_rect_height(&crtc_state->psr2_su_area);
-
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
- PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -3008,19 +3071,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(pipe_config);
+ i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
+
if (DISPLAY_VER(dev_priv) >= 4) {
- /* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
- else
- tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
+ tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -3030,26 +3090,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
- DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
- FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
- FP1(crtc->pipe));
- } else {
- /* Mask out read-only status bits. */
- pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
- DPLL_PORTC_READY_MASK |
- DPLL_PORTB_READY_MASK);
- }
if (IS_CHERRYVIEW(dev_priv))
- chv_crtc_clock_get(crtc, pipe_config);
+ chv_crtc_clock_get(pipe_config);
else if (IS_VALLEYVIEW(dev_priv))
- vlv_crtc_clock_get(crtc, pipe_config);
+ vlv_crtc_clock_get(pipe_config);
else
- i9xx_crtc_clock_get(crtc, pipe_config);
+ i9xx_crtc_clock_get(pipe_config);
/*
* Normally the dotclock is filled in by the encoder .get_config()
@@ -3675,8 +3722,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3702,11 +3749,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -4723,8 +4770,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_bigjoiner_adjust_pipe_src(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4792,42 +4837,92 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
}
static bool
+intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
+ return a->vtotal == b->vtotal &&
+ a->target_rr == b->target_rr &&
+ a->duration_incr_ms == b->duration_incr_ms &&
+ a->duration_decr_ms == b->duration_decr_ms &&
+ a->mode == b->mode;
+}
+
+static bool
intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
{
return memcmp(a, b, len) == 0;
}
+static void __printf(5, 6)
+pipe_config_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (fastset)
+ drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+ else
+ drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+
+ va_end(args);
+}
+
static void
-pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
- bool fastset, const char *name,
+pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const char *loglevel;
+
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- drm_dbg_kms(&dev_priv->drm,
- "fastset requirement not met in %s infoframe\n", name);
- drm_dbg_kms(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg_kms(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ loglevel = KERN_DEBUG;
} else {
- drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
- drm_err(&dev_priv->drm, "expected:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err(&dev_priv->drm, "found:\n");
- hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ loglevel = KERN_ERR;
}
+
+ pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
+
+ drm_printf(p, "expected:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ drm_printf(p, "found:\n");
+ hdmi_infoframe_log(loglevel, i915->drm.dev, b);
}
static void
-pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
+ pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+
+ drm_printf(p, "expected:\n");
+ drm_dp_vsc_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_vsc_sdp_log(p, b);
+}
+
+static void
+pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
+ bool fastset, const char *name,
+ const struct drm_dp_as_sdp *a,
+ const struct drm_dp_as_sdp *b)
+{
struct drm_printer p;
if (fastset) {
@@ -4841,9 +4936,9 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915,
}
drm_printf(&p, "expected:\n");
- drm_dp_vsc_sdp_log(&p, a);
+ drm_dp_as_sdp_log(&p, a);
drm_printf(&p, "found:\n");
- drm_dp_vsc_sdp_log(&p, b);
+ drm_dp_as_sdp_log(&p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -4861,64 +4956,35 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
}
static void
-pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
+pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const char *loglevel;
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_DEBUG;
} else {
- /* only dump up to the last difference */
- len = memcmp_diff_len(a, b, len);
-
- drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ loglevel = KERN_ERR;
}
-}
-static void __printf(4, 5)
-pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
- const char *name, const char *format, ...)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
+ pipe_config_mismatch(p, fastset, crtc, name, "buffer");
- if (fastset)
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
- else
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
- va_end(args);
+ print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
}
static void
-pipe_config_pll_mismatch(bool fastset,
+pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_dpll_hw_state *a,
@@ -4926,25 +4992,12 @@ pipe_config_pll_mismatch(bool fastset,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
+ pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
- drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] fastset requirement not met in %s\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_dbg_kms(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_dbg_kms(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- } else {
- drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
- crtc->base.base.id, crtc->base.name, name);
- drm_err(&i915->drm, "expected:\n");
- intel_dpll_dump_hw_state(i915, a);
- drm_err(&i915->drm, "found:\n");
- intel_dpll_dump_hw_state(i915, b);
- }
+ drm_printf(p, "expected:\n");
+ intel_dpll_dump_hw_state(i915, p, a);
+ drm_printf(p, "found:\n");
+ intel_dpll_dump_hw_state(i915, p, b);
}
bool
@@ -4954,13 +5007,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
{
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_printer p;
bool ret = true;
+ if (fastset)
+ p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ else
+ p = drm_err_printer(&dev_priv->drm, NULL);
+
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
@@ -4972,7 +5031,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -4984,7 +5043,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
__stringify(name) " is bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
@@ -4996,7 +5055,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
__stringify(name) " is not bool"); \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
str_yes_no(pipe_config->name)); \
@@ -5006,7 +5065,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
@@ -5017,7 +5076,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
current_config->name.tu, \
@@ -5037,7 +5096,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_PLL(name) do { \
if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
&pipe_config->name)) { \
- pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
@@ -5070,7 +5129,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
@@ -5082,7 +5141,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5092,7 +5151,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
+ if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5103,7 +5172,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
- pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
+ pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \
current_config->name, \
pipe_config->name, \
(len)); \
@@ -5116,7 +5185,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!intel_color_lut_equal(current_config, \
current_config->lut, pipe_config->lut, \
is_pre_csc_lut)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(lut), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \
"hw_state doesn't match sw_state"); \
ret = false; \
} \
@@ -5245,6 +5314,18 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
+ /*
+ * Panel replay has to be enabled before link training. PSR doesn't have
+ * this requirement -> check these only if using panel replay
+ */
+ if (current_config->has_panel_replay || pipe_config->has_panel_replay) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
+ PIPE_CONF_CHECK_BOOL(has_panel_replay);
+ }
+
PIPE_CONF_CHECK_BOOL(double_wide);
if (dev_priv->display.dpll.mgr)
@@ -5280,6 +5361,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -5331,6 +5413,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.flipline);
PIPE_CONF_CHECK_I(vrr.pipeline_full);
PIPE_CONF_CHECK_I(vrr.guardband);
+ PIPE_CONF_CHECK_I(vrr.vsync_start);
+ PIPE_CONF_CHECK_I(vrr.vsync_end);
}
#undef PIPE_CONF_CHECK_X
@@ -5576,14 +5660,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
- drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ crtc->base.base.id, crtc->base.name);
else
new_crtc_state->uapi.mode_changed = false;
@@ -6237,27 +6323,37 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
- drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable);
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
continue;
- }
ret = intel_crtc_prepare_cleared_state(state, crtc);
if (ret)
- break;
+ goto fail;
if (!new_crtc_state->hw.enable)
continue;
ret = intel_modeset_pipe_config(state, crtc, limits);
if (ret)
- break;
+ goto fail;
+ }
- ret = intel_atomic_check_bigjoiner(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state)))
+ continue;
+
+ if (!new_crtc_state->hw.enable)
+ continue;
+
+ ret = intel_modeset_pipe_config_late(state, crtc);
if (ret)
- break;
+ goto fail;
}
+fail:
if (ret)
*failed_pipe = crtc->pipe;
@@ -6353,16 +6449,26 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ continue;
+ }
+
+ ret = intel_atomic_check_bigjoiner(state, crtc);
+ if (ret)
+ goto fail;
+ }
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (new_crtc_state->hw.enable) {
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
- }
+ intel_bigjoiner_adjust_pipe_src(new_crtc_state);
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6644,17 +6750,21 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- /* VRR will be enable later, if required */
- intel_crtc_update_active_timings(new_crtc_state, false);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ /* VRR will be enable later, if required */
+ intel_crtc_update_active_timings(pipe_crtc_state, false);
+ }
- if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
- return;
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -6746,31 +6856,42 @@ static void intel_update_crtc(struct intel_atomic_state *state,
}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc *pipe_crtc;
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- intel_crtc_disable_pipe_crc(crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state))
+ intel_crtc_disable_pipe_crc(pipe_crtc);
dev_priv->display.funcs.display->crtc_disable(state, crtc);
- crtc->active = false;
- intel_fbc_disable(crtc);
- if (!new_crtc_state->hw.active)
- intel_initial_watermarks(state, crtc);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *new_pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ pipe_crtc->active = false;
+ intel_fbc_disable(pipe_crtc);
+
+ if (!new_pipe_crtc_state->hw.active)
+ intel_initial_watermarks(state, pipe_crtc);
+ }
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- u32 handled = 0;
+ u8 disable_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -6778,21 +6899,31 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
+ /*
+ * Needs to be done even for pipes
+ * that weren't enabled previously.
+ */
intel_pre_plane_update(state, crtc);
if (!old_crtc_state->hw.active)
continue;
+ disable_pipes |= BIT(crtc->pipe);
+ }
+
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
+ continue;
+
intel_crtc_disable_planes(state, crtc);
}
/* Only disable port sync and MST slaves */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
/* In case of Transcoder port Sync master slave CRTCs can be
@@ -6801,28 +6932,28 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
* Slave vblanks are masked till Master Vblanks.
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
- !intel_dp_mst_is_slave_trans(old_crtc_state) &&
- !intel_crtc_is_bigjoiner_slave(old_crtc_state))
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
- handled |= BIT(crtc->pipe);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
/* Disable everything else left on */
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state) ||
- (handled & BIT(crtc->pipe)))
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if ((disable_pipes & BIT(crtc->pipe)) == 0)
continue;
- if (!old_crtc_state->hw.active)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
continue;
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
+ intel_old_crtc_state_disables(state, crtc);
+
+ disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
+
+ drm_WARN_ON(&i915->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -6889,9 +7020,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
+ intel_dbuf_mbus_pre_ddb_update(state);
+
while (update_pipes) {
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -6919,6 +7056,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ intel_dbuf_mbus_post_ddb_update(state);
+
update_pipes = modeset_pipes;
/*
@@ -6931,12 +7070,14 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_master(new_crtc_state) ||
- intel_crtc_is_bigjoiner_master(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- modeset_pipes &= ~BIT(pipe);
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6951,7 +7092,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- modeset_pipes &= ~BIT(pipe);
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ continue;
+
+ modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
intel_enable_crtc(state, crtc);
}
@@ -6968,7 +7112,11 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_pre_update_crtc(state, crtc);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ /*
+ * Commit in reverse order to make bigjoiner master
+ * send the uapi events after slaves are done.
+ */
+ for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((update_pipes & BIT(pipe)) == 0)
@@ -7165,7 +7313,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
- intel_mbus_dbox_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
@@ -7690,7 +7837,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->max_dotclk_freq;
+ int max_dotclock = i915->display.cdclk.max_dotclk_freq;
/* icl+ might use bigjoiner */
if (DISPLAY_VER(i915) >= 11)
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f4a0773f0fca..56d1c0e3e62c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -280,6 +280,12 @@ enum phy_fia {
base.head) \
for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
+#define for_each_intel_crtc_in_pipe_mask_reverse(dev, intel_crtc, pipe_mask) \
+ list_for_each_entry_reverse((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((pipe_mask) & BIT((intel_crtc)->pipe))
+
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
@@ -344,6 +350,14 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_new_intel_crtc_in_state_reverse(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -408,6 +422,7 @@ intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
+u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
@@ -448,6 +463,13 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
+
+enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
+bool intel_encoder_is_combo(struct intel_encoder *encoder);
+bool intel_encoder_is_snps(struct intel_encoder *encoder);
+bool intel_encoder_is_tc(struct intel_encoder *encoder);
+enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
+
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
new file mode 100644
index 000000000000..ad8545c8055d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+/*
+ * This header is for transitional struct intel_display conversion helpers only.
+ */
+
+#ifndef __INTEL_DISPLAY_CONVERSION__
+#define __INTEL_DISPLAY_CONVERSION__
+
+/*
+ * Transitional macro to optionally convert struct drm_i915_private * to struct
+ * intel_display *, also accepting the latter.
+ */
+#define __to_intel_display(p) \
+ _Generic(p, \
+ const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
+ struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct intel_display *: (p), \
+ struct intel_display *: (p))
+
+#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 2167dbee5eea..7715fc329057 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -26,6 +26,7 @@
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
+#include "intel_dmc_wl.h"
#include "intel_wm_types.h"
struct task_struct;
@@ -282,6 +283,9 @@ struct intel_wm {
};
struct intel_display {
+ /* drm device backpointer */
+ struct drm_device *drm;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -345,6 +349,8 @@ struct intel_display {
struct intel_global_obj obj;
unsigned int max_cdclk_freq;
+ unsigned int max_dotclk_freq;
+ unsigned int skl_preferred_vco_freq;
} cdclk;
struct {
@@ -446,6 +452,16 @@ struct intel_display {
} ips;
struct {
+ bool display_irqs_enabled;
+
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
+
+ u32 de_irq_mask[I915_MAX_PIPES];
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
+ } irq;
+
+ struct {
wait_queue_head_t waitqueue;
/* mutex to protect pmdemand programming sequence */
@@ -534,6 +550,7 @@ struct intel_display {
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;
+ struct intel_dmc_wl wl;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index b99c024b0934..35f9f86ef70f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -31,6 +31,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_wm.h"
@@ -191,7 +192,7 @@ static void intel_hdcp_info(struct seq_file *m,
struct intel_connector *intel_connector,
bool remote_req)
{
- bool hdcp_cap, hdcp2_cap;
+ bool hdcp_cap = false, hdcp2_cap = false;
if (!intel_connector->hdcp.shim) {
seq_puts(m, "No Connector Support");
@@ -252,9 +253,6 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
const struct drm_display_mode *mode;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
@@ -271,28 +269,23 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!encoder)
- return;
-
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
+ if (intel_connector->mst_port)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
+ intel_hdmi_info(m, intel_connector);
break;
default:
break;
}
seq_puts(m, "\tHDCP version: ");
- if (intel_encoder_is_mst(encoder)) {
+ if (intel_connector->mst_port) {
intel_hdcp_info(m, intel_connector, true);
seq_puts(m, "\tMST Hub HDCP version: ");
}
@@ -645,51 +638,24 @@ static int i915_display_capabilities(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
struct intel_shared_dpll *pll;
int i;
drm_modeset_lock_all(&dev_priv->drm);
- seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
dev_priv->display.dpll.ref_clks.nssc,
dev_priv->display.dpll.ref_clks.ssc);
for_each_shared_dpll(dev_priv, pll, i) {
- seq_printf(m, "DPLL%i: %s, id: %i\n", pll->index,
+ drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
- seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
pll->state.pipe_mask, pll->active_mask,
str_yes_no(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ drm_printf(&p, " tracked hardware state:\n");
+ intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state);
}
drm_modeset_unlock_all(&dev_priv->drm);
@@ -1103,27 +1069,6 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_params(i915);
}
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-
- if (connector->base.status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->pps.panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->pps.panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->pps.backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->pps.backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -1402,20 +1347,6 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
-static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
-
- return 0;
-}
-
static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -1437,30 +1368,6 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
return len;
}
-static ssize_t i915_bigjoiner_enable_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct intel_connector *connector = m->private;
- struct drm_crtc *crtc;
- bool bigjoiner_en = 0;
- int ret;
-
- crtc = connector->base.state->crtc;
- if (connector->base.status != connector_status_connected || !crtc)
- return -ENODEV;
-
- ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
- if (ret < 0)
- return ret;
-
- connector->force_bigjoiner_enable = bigjoiner_en;
- *offp += len;
-
- return len;
-}
-
static int i915_dsc_output_format_open(struct inode *inode,
struct file *file)
{
@@ -1554,8 +1461,6 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
.write = i915_dsc_fractional_bpp_write
};
-DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
-
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1608,12 +1513,9 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
return;
intel_drrs_connector_debugfs_add(connector);
+ intel_pps_connector_debugfs_add(connector);
intel_psr_connector_debugfs_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", 0444, root,
- connector, &i915_panel_fops);
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
@@ -1640,8 +1542,8 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
if (DISPLAY_VER(i915) >= 11 &&
(connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
- debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
- connector, &i915_bigjoiner_enable_fops);
+ debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root,
+ &connector->force_bigjoiner_enable);
}
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index c02d79b50006..120e209ee74a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -17,6 +17,9 @@
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
+
static const struct intel_display_device_info no_display = {};
#define PIPE_A_OFFSET 0x70000
@@ -768,6 +771,8 @@ static const struct intel_display_device_info xe2_lpd_display = {
BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D),
};
+__diag_pop();
+
/*
* Separate detection for no display cases to keep the display id array simple.
*
@@ -922,6 +927,9 @@ void intel_display_device_probe(struct drm_i915_private *i915)
const struct intel_display_device_info *info;
u16 ver, rel, step;
+ /* Add drm device backpointer as early as possible. */
+ i915->display.drm = &i915->drm;
+
if (HAS_GMD_ID(i915))
info = probe_gmdid_display(i915, &ver, &rel, &step);
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index fe4268813786..17ddf82f0b6e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_i915_private;
@@ -47,6 +48,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
@@ -68,6 +70,7 @@ struct drm_printer;
#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
+#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
@@ -98,8 +101,8 @@ struct drm_printer;
(IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__i915), (from), (until)))
-#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info)
+#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 87dd07e0d138..794b4af38055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -11,6 +11,7 @@
#include <acpi/video.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
@@ -98,7 +99,6 @@ void intel_display_driver_init_hw(struct drm_i915_private *i915)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.get_format_info = intel_fb_get_format_info,
- .output_poll_changed = intel_fbdev_output_poll_changed,
.mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
@@ -198,11 +198,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
+ intel_dmc_wl_init(&i915->display);
}
/* part #1: call before irq install */
int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
int ret;
if (i915_inject_probe_failure(i915))
@@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- intel_init_quirks(i915);
+ intel_init_quirks(display);
intel_fbc_init(i915);
@@ -514,10 +516,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
intel_overlay_setup(i915);
- ret = intel_fbdev_init(&i915->drm);
- if (ret)
- return ret;
-
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -542,17 +540,9 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
- intel_display_debugfs_register(i915);
+ intel_audio_register(i915);
- /*
- * Some ports require correctly set-up hpd registers for
- * detection to work properly (leading to ghost connected
- * connector status), e.g. VGA on gm45. Hence we can only set
- * up the initial fbdev config after hpd irqs are fully
- * enabled. We do it last so that the async config cannot run
- * before the connectors are registered.
- */
- intel_fbdev_initial_config_async(i915);
+ intel_display_debugfs_register(i915);
/*
* We need to coordinate the hotplugs with the asynchronous
@@ -562,6 +552,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
drm_kms_helper_poll_init(&i915->drm);
intel_hpd_poll_disable(i915);
+ intel_fbdev_setup(i915);
+
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
}
@@ -597,9 +589,6 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(i915);
-
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
@@ -638,7 +627,8 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_fbdev_unregister(i915);
+ drm_client_dev_unregister(&i915->drm);
+
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index f846c5b108b5..c337e0597541 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -117,13 +117,14 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->de_irq_mask[pipe];
+ new_val = dev_priv->display.irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->de_irq_mask[pipe]) {
- dev_priv->de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
+ dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe]);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -179,7 +180,7 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+ u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -233,10 +234,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -256,10 +257,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
@@ -401,7 +402,7 @@ void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->pipestat_irq_mask[pipe] = 0;
+ dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
}
}
@@ -412,7 +413,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled) {
+ if (!dev_priv->display.irq.display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -445,7 +446,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->pipestat_irq_mask[pipe];
+ status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
@@ -1203,7 +1204,7 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
/*
* Vblank interrupts fail to wake the device up from C2+.
@@ -1211,8 +1212,8 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* the problem. There is a small power cost so we do this
* only when vblank interrupts are actually enabled.
*/
- if (dev_priv->vblank_enabled++ == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (i915->display.irq.vblank_enabled++ == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -1315,12 +1316,12 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->vblank_enabled == 0)
- intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ if (--i915->display.irq.vblank_enabled == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -1497,8 +1498,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
- ~dev_priv->de_irq_mask[pipe] | extra_ier);
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1558,10 +1559,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = true;
+ dev_priv->display.irq.display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
vlv_display_irq_reset(dev_priv);
@@ -1573,10 +1574,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display_irqs_enabled)
+ if (!dev_priv->display.irq.display_irqs_enabled)
return;
- dev_priv->display_irqs_enabled = false;
+ dev_priv->display.irq.display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
vlv_display_irq_reset(dev_priv);
@@ -1694,12 +1695,12 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
for_each_pipe(dev_priv, pipe) {
- dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
+ dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->de_irq_mask[pipe],
+ dev_priv->display.irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -1770,9 +1771,9 @@ void intel_display_irq_init(struct drm_i915_private *i915)
* domain. We defer setting up the display irqs in this case to the
* runtime pm.
*/
- i915->display_irqs_enabled = true;
+ i915->display.irq.display_irqs_enabled = true;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display_irqs_enabled = false;
+ i915->display.irq.display_irqs_enabled = false;
intel_hotplug_irq_init(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 11e03cfb774d..1799a6643128 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -27,6 +27,10 @@ static struct intel_display_params intel_display_modparams __read_mostly = {
* debugfs mode to 0.
*/
+intel_display_param_named_unsafe(dmc_firmware_path, charp, 0400,
+ "DMC firmware path to use instead of the default one. "
+ "Use /dev/null to disable DMC and runtime PM.");
+
intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
@@ -116,6 +120,11 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
+intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+ "Enable DMC wakelock "
+ "(0=disabled, 1=enabled) "
+ "Default: 0");
+
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
const char *name, bool val)
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 6206cc51df04..1208a62c16d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -24,6 +24,7 @@ struct drm_i915_private;
* debugfs file
*/
#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, vbt_firmware, NULL, 0400) \
param(int, lvds_channel_mode, 0, 0400) \
param(int, panel_use_ssc, -1, 0600) \
@@ -46,6 +47,7 @@ struct drm_i915_private;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
+ param(bool, enable_dmc_wl, false, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 6fd4fa52253a..03dc7edcc443 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -640,13 +640,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
- /*
- * The caller must hold already raw wakeref, upgrade that to a proper
- * wakeref to make the state checker happy about the HW access during
- * power well disabling.
- */
- assert_rpm_raw_wakeref_held(rpm);
- wakeref = intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get_noresume(rpm);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 06900ff307b2..83f616097a29 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -17,6 +17,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux_regs.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -26,6 +27,7 @@
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -199,6 +201,9 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
+#define ICL_AUX_PW_TO_PHY(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
+
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
@@ -217,27 +222,22 @@ static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = NULL;
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+
/* We'll check the MST primary port */
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
dig_port = enc_to_dig_port(encoder);
- if (!dig_port)
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
- break;
+ if (dig_port && dig_port->aux_ch == aux_ch)
+ return dig_port;
}
- return dig_port;
+ return NULL;
}
static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
@@ -253,7 +253,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
* as HDMI-only and routed to a combo PHY, the encoder either won't be
* present at all or it will not have an aux_ch assigned.
*/
- return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
+ return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,17 +396,11 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&i915->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_EDP &&
- encoder->port == port)
- return true;
- }
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return false;
+ return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
@@ -415,24 +409,25 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- 0, ICL_LANE_ENABLE_AUX);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_port_is_edp(dev_priv, (enum port)phy))
- intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
- 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
+ !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
@@ -441,14 +436,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- /* FIXME this is a mess */
- if (phy != PHY_NONE)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
- ICL_LANE_ENABLE_AUX, 0);
+ /*
+ * FIXME not sure if we should derive the PHY from the pw_idx, or
+ * from the VBT defined AUX_CH->DDI->PHY mapping.
+ */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
@@ -827,6 +823,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -856,6 +854,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_dmc_wl_enable(&dev_priv->display);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -906,39 +906,39 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
if (IS_GEMINILAKE(dev_priv)) {
power_well = lookup_power_well(dev_priv,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
}
}
@@ -976,16 +976,18 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ intel_dmc_wl_disable(&dev_priv->display);
+
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
+ intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
+ bxt_verify_dpio_phy_power_wells(dev_priv);
if (DISPLAY_VER(dev_priv) >= 11)
/*
@@ -1396,8 +1398,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
@@ -1441,9 +1443,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
@@ -1519,9 +1521,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
return;
if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
+ reg = CHV_CMN_DW0_CH0;
else
- reg = _CHV_CMN_DW6_CH1;
+ reg = CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
val = vlv_dpio_read(dev_priv, phy, reg);
@@ -1552,10 +1554,11 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
}
if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
+ DPIO_ALLDL_POWERDOWN_CH0, val);
else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
+ DPIO_ALLDL_POWERDOWN_CH1, val);
drm_WARN(&dev_priv->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 2f07b7afa3bf..b83ad06f2ea7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -29,21 +29,21 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PIPE3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_BASE_PORT3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->pipe_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->pipe_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_TRANS2(tran, reg) _MMIO(DISPLAY_INFO(dev_priv)->trans_offsets[(tran)] - \
- DISPLAY_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
-#define _MMIO_CURSOR2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->cursor_offsets[(pipe)] - \
- DISPLAY_INFO(dev_priv)->cursor_offsets[PIPE_A] + \
- DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_PIPE2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->pipe_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->pipe_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_TRANS2(display, tran, reg) _MMIO(DISPLAY_INFO(display)->trans_offsets[(tran)] - \
+ DISPLAY_INFO(display)->trans_offsets[TRANSCODER_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
+#define _MMIO_CURSOR2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->cursor_offsets[(pipe)] - \
+ DISPLAY_INFO(display)->cursor_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(display) + (reg))
#endif /* __INTEL_DISPLAY_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 7862e7cefe02..49a5e6d9dc0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -34,7 +34,7 @@ TRACE_EVENT(intel_pipe_enable,
TP_fast_assign(
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc *it__;
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
@@ -63,7 +63,7 @@ TRACE_EVENT(intel_pipe_disable,
TP_fast_assign(
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc *it__;
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
@@ -91,7 +91,7 @@ TRACE_EVENT(intel_pipe_crc,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -119,7 +119,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -144,7 +144,7 @@ TRACE_EVENT(intel_pch_fifo_underrun,
TP_fast_assign(
enum pipe pipe = pch_transcoder;
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
- __assign_str(dev, __dev_name_i915(dev_priv));
+ __assign_str(dev);
__entry->pipe = pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -169,7 +169,7 @@ TRACE_EVENT(intel_memory_cxsr,
TP_fast_assign(
struct intel_crtc *crtc;
- __assign_str(dev, __dev_name_i915(dev_priv));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, crtc) {
__entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
__entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
@@ -209,7 +209,7 @@ TRACE_EVENT(g4x_wm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -256,7 +256,7 @@ TRACE_EVENT(vlv_wm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -293,7 +293,7 @@ TRACE_EVENT(vlv_fifo_size,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -323,8 +323,8 @@ TRACE_EVENT(intel_plane_update_noarm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -354,8 +354,8 @@ TRACE_EVENT(intel_plane_update_arm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -383,8 +383,8 @@ TRACE_EVENT(intel_plane_disable_arm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -410,8 +410,8 @@ TRACE_EVENT(intel_fbc_activate,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -437,8 +437,8 @@ TRACE_EVENT(intel_fbc_deactivate,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -464,8 +464,8 @@ TRACE_EVENT(intel_fbc_nuke,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -488,7 +488,7 @@ TRACE_EVENT(intel_crtc_vblank_work_start,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -511,7 +511,7 @@ TRACE_EVENT(intel_crtc_vblank_work_end,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -536,7 +536,7 @@ TRACE_EVENT(intel_pipe_update_start,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -564,7 +564,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = crtc->debug.start_vbl_count;
__entry->scanline = crtc->debug.scanline_start;
@@ -590,7 +590,7 @@ TRACE_EVENT(intel_pipe_update_end,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = frame;
__entry->scanline = scanline_end;
@@ -613,7 +613,7 @@ TRACE_EVENT(intel_frontbuffer_invalidate,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_i915(i915));
+ __assign_str(dev);
__entry->frontbuffer_bits = frontbuffer_bits;
__entry->origin = origin;
),
@@ -634,7 +634,7 @@ TRACE_EVENT(intel_frontbuffer_flush,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_i915(i915));
+ __assign_str(dev);
__entry->frontbuffer_bits = frontbuffer_bits;
__entry->origin = origin;
),
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e67cd5b02e84..62f7a30c37dc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -661,7 +661,8 @@ struct intel_digital_connector_state {
int broadcast_rgb;
};
-#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
+#define to_intel_digital_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_digital_connector_state, base)
struct dpll {
/* given values */
@@ -727,6 +728,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1002,18 +1004,6 @@ enum intel_output_format {
INTEL_OUTPUT_FORMAT_YCBCR444,
};
-struct intel_mpllb_state {
- u32 clock; /* in KHz */
- u32 ref_control;
- u32 mpllb_cp;
- u32 mpllb_div;
- u32 mpllb_div2;
- u32 mpllb_fracn1;
- u32 mpllb_fracn2;
- u32 mpllb_sscen;
- u32 mpllb_sscstep;
-};
-
/* Used by dp and fdi links */
struct intel_link_m_n {
u32 tu;
@@ -1029,31 +1019,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-struct intel_c10pll_state {
- u32 clock; /* in KHz */
- u8 tx;
- u8 cmn;
- u8 pll[20];
-};
-
-struct intel_c20pll_state {
- u32 clock; /* in kHz */
- u16 tx[3];
- u16 cmn[4];
- union {
- u16 mplla[10];
- u16 mpllb[11];
- };
-};
-
-struct intel_cx0pll_state {
- union {
- struct intel_c10pll_state c10;
- struct intel_c20pll_state c20;
- };
- bool ssc_enabled;
-};
-
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1198,11 +1163,7 @@ struct intel_crtc_state {
struct intel_shared_dpll *shared_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
- union {
- struct intel_dpll_hw_state dpll_hw_state;
- struct intel_mpllb_state mpllb_state;
- struct intel_cx0pll_state cx0pll_state;
- };
+ struct intel_dpll_hw_state dpll_hw_state;
/*
* ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
@@ -1345,6 +1306,7 @@ struct intel_crtc_state {
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
struct drm_dp_vsc_sdp vsc;
+ struct drm_dp_as_sdp as_sdp;
} infoframes;
u8 eld[MAX_ELD_BYTES];
@@ -1422,6 +1384,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
+ u32 pipe_srcsz_early_tpt;
+
struct drm_rect psr2_su_area;
/* Variable Refresh Rate state */
@@ -1429,6 +1393,7 @@ struct intel_crtc_state {
bool enable, in_range;
u8 pipeline_full;
u16 flipline, vmin, vmax, guardband;
+ u32 vsync_end, vsync_start;
} vrr;
/* Stream Splitter for eDP MSO */
@@ -1617,12 +1582,17 @@ struct intel_watermark_params {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
-#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
+
+#define to_intel_crtc_state(crtc_state) \
+ container_of_const((crtc_state), struct intel_crtc_state, uapi)
+#define to_intel_plane_state(plane_state) \
+ container_of_const((plane_state), struct intel_plane_state, uapi)
+#define to_intel_framebuffer(fb) \
+ container_of_const((fb), struct intel_framebuffer, base)
+
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1737,6 +1707,8 @@ struct intel_psr {
/* LNL and beyond */
u8 check_entry_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
} alpm_parameters;
ktime_t last_entry_attempt;
@@ -1798,6 +1770,7 @@ struct intel_dp {
bool is_mst;
int active_mst_links;
+ enum drm_dp_mst_mode mst_detect;
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -2183,4 +2156,41 @@ static inline int to_bpp_x16(int bpp)
return bpp << 4;
}
+/*
+ * Conversion functions/macros from various pointer types to struct
+ * intel_display pointer.
+ */
+#define __drm_device_to_intel_display(p) \
+ (&to_i915(p)->display)
+#define __intel_connector_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_crtc_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->uapi.crtc->dev)
+#define __intel_digital_port_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.base.dev)
+#define __intel_dp_to_intel_display(p) \
+ __drm_device_to_intel_display(dp_to_dig_port(p)->base.base.dev)
+#define __intel_encoder_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_hdmi_to_intel_display(p) \
+ __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev)
+
+/* Helper for generic association. Map types to conversion functions/macros. */
+#define __assoc(type, p) \
+ struct type: __##type##_to_intel_display((struct type *)(p))
+
+/* Convert various pointer types to struct intel_display pointer. */
+#define to_intel_display(p) \
+ _Generic(*p, \
+ __assoc(drm_device, p), \
+ __assoc(intel_connector, p), \
+ __assoc(intel_crtc, p), \
+ __assoc(intel_crtc_state, p), \
+ __assoc(intel_digital_port, p), \
+ __assoc(intel_dp, p), \
+ __assoc(intel_encoder, p), \
+ __assoc(intel_hdmi, p))
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index ac136fd992ba..e5a8022db664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -10,20 +10,12 @@
static void gen11_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14010594013 */
intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
static void xe_d_display_wa_apply(struct drm_i915_private *i915)
{
- /* Wa_1409120013 */
- intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
/* Wa_14013723622 */
intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 835781624482..cbd2ac5671b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include "i915_drv.h"
@@ -38,6 +39,8 @@
* low-power state and comes back to normal.
*/
+#define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
+
enum intel_dmc_id {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
@@ -71,6 +74,21 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
return i915->display.dmc.dmc;
}
+static const char *dmc_firmware_param(struct drm_i915_private *i915)
+{
+ const char *p = i915->display.params.dmc_firmware_path;
+
+ return p && *p ? p : NULL;
+}
+
+static bool dmc_firmware_param_disabled(struct drm_i915_private *i915)
+{
+ const char *p = dmc_firmware_param(i915);
+
+ /* Magic path to indicate disabled */
+ return p && !strcmp(p, "/dev/null");
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
@@ -89,10 +107,14 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
__stringify(major) "_" \
__stringify(minor) ".bin"
+#define XE2LPD_DMC_MAX_FW_SIZE 0x8000
#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd)
+MODULE_FIRMWARE(XE2LPD_DMC_PATH);
+
#define MTL_DMC_PATH DMC_PATH(mtl)
MODULE_FIRMWARE(MTL_DMC_PATH);
@@ -136,6 +158,59 @@ MODULE_FIRMWARE(SKL_DMC_PATH);
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
+static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size)
+{
+ const char *fw_path = NULL;
+ u32 max_fw_size = 0;
+
+ if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) {
+ fw_path = XE2LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
+ fw_path = MTL_DMC_PATH;
+ max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
+ fw_path = DG2_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(i915)) {
+ fw_path = ADLP_DMC_PATH;
+ max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_S(i915)) {
+ fw_path = ADLS_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_DG1(i915)) {
+ fw_path = DG1_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_ROCKETLAKE(i915)) {
+ fw_path = RKL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_TIGERLAKE(i915)) {
+ fw_path = TGL_DMC_PATH;
+ max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER(i915) == 11) {
+ fw_path = ICL_DMC_PATH;
+ max_fw_size = ICL_DMC_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(i915)) {
+ fw_path = GLK_DMC_PATH;
+ max_fw_size = GLK_DMC_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ fw_path = KBL_DMC_PATH;
+ max_fw_size = KBL_DMC_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(i915)) {
+ fw_path = SKL_DMC_PATH;
+ max_fw_size = SKL_DMC_MAX_FW_SIZE;
+ } else if (IS_BROXTON(i915)) {
+ fw_path = BXT_DMC_PATH;
+ max_fw_size = BXT_DMC_MAX_FW_SIZE;
+ }
+
+ *size = max_fw_size;
+
+ return fw_path;
+}
+
#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define PACKAGE_MAX_FW_INFO_ENTRIES 20
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
@@ -546,6 +621,8 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
pipedmc_clock_gating_wa(i915, true);
disable_all_event_handlers(i915);
pipedmc_clock_gating_wa(i915, false);
+
+ intel_dmc_wl_disable(&i915->display);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
@@ -845,7 +922,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
+static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
@@ -858,13 +935,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
u32 r, offset;
if (!fw)
- return;
+ return -EINVAL;
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
r = parse_dmc_fw_css(dmc, css_header, fw->size);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -872,7 +949,7 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
package_header = (struct intel_package_header *)&fw->data[readcount];
r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
if (!r)
- return;
+ return -EINVAL;
readcount += r;
@@ -889,6 +966,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
+
+ if (!intel_dmc_has_payload(i915)) {
+ drm_err(&i915->drm, "DMC firmware main program not found\n");
+ return -ENOENT;
+ }
+
+ return 0;
}
static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
@@ -923,7 +1007,7 @@ static void dmc_load_work_fn(struct work_struct *work)
err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
- if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ if (err == -ENOENT && !dmc_firmware_param(i915)) {
fallback_path = dmc_fallback_path(i915);
if (fallback_path) {
drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
@@ -934,24 +1018,31 @@ static void dmc_load_work_fn(struct work_struct *work)
}
}
- parse_dmc_fw(dmc, fw);
-
- if (intel_dmc_has_payload(i915)) {
- intel_dmc_load_program(i915);
- intel_dmc_runtime_pm_put(i915);
-
- drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
- dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
- } else {
+ if (err) {
drm_notice(&i915->drm,
- "Failed to load DMC firmware %s."
- " Disabling runtime power management.\n",
- dmc->fw_path);
+ "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
drm_notice(&i915->drm, "DMC firmware homepage: %s",
- INTEL_UC_FIRMWARE_URL);
+ INTEL_DMC_FIRMWARE_URL);
+ return;
}
+ err = parse_dmc_fw(dmc, fw);
+ if (err) {
+ drm_notice(&i915->drm,
+ "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n",
+ dmc->fw_path, ERR_PTR(err));
+ goto out;
+ }
+
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
+
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+
+out:
release_firmware(fw);
}
@@ -987,56 +1078,16 @@ void intel_dmc_init(struct drm_i915_private *i915)
INIT_WORK(&dmc->work, dmc_load_work_fn);
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
- dmc->fw_path = MTL_DMC_PATH;
- dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
- dmc->fw_path = DG2_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
- dmc->fw_path = ADLP_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
- dmc->fw_path = ADLS_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
- dmc->fw_path = DG1_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
- dmc->fw_path = RKL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
- dmc->fw_path = TGL_DMC_PATH;
- dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(i915) == 11) {
- dmc->fw_path = ICL_DMC_PATH;
- dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
- dmc->fw_path = GLK_DMC_PATH;
- dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
- dmc->fw_path = KBL_DMC_PATH;
- dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
- dmc->fw_path = SKL_DMC_PATH;
- dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
- dmc->fw_path = BXT_DMC_PATH;
- dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
- }
-
- if (i915->params.dmc_firmware_path) {
- if (strlen(i915->params.dmc_firmware_path) == 0) {
- drm_info(&i915->drm,
- "Disabling DMC firmware and runtime PM\n");
- goto out;
- }
+ dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size);
- dmc->fw_path = i915->params.dmc_firmware_path;
+ if (dmc_firmware_param_disabled(i915)) {
+ drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n");
+ goto out;
}
+ if (dmc_firmware_param(i915))
+ dmc->fw_path = dmc_firmware_param(i915);
+
if (!dmc->fw_path) {
drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
@@ -1072,6 +1123,8 @@ void intel_dmc_suspend(struct drm_i915_private *i915)
if (dmc)
flush_work(&dmc->work);
+ intel_dmc_wl_disable(&i915->display);
+
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(i915))
intel_dmc_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 90d0dbb41cfe..1bf446f96a10 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -97,4 +97,10 @@
#define TGL_DMC_DEBUG3 _MMIO(0x101090)
#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
+#define DMC_WAKELOCK_CFG _MMIO(0x8F1B0)
+#define DMC_WAKELOCK_CFG_ENABLE REG_BIT(31)
+#define DMC_WAKELOCK1_CTL _MMIO(0x8F140)
+#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
+#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
+
#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
new file mode 100644
index 000000000000..d9864b9cc429
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "intel_de.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+#include "intel_dmc_wl.h"
+
+/**
+ * DOC: DMC wakelock support
+ *
+ * Wake lock is the mechanism to cause display engine to exit DC
+ * states to allow programming to registers that are powered down in
+ * those states. Previous projects exited DC states automatically when
+ * detecting programming. Now software controls the exit by
+ * programming the wake lock. This improves system performance and
+ * system interactions and better fits the flip queue style of
+ * programming. Wake lock is only required when DC5, DC6, or DC6v have
+ * been enabled in DC_STATE_EN and the wake lock mode of operation has
+ * been enabled.
+ *
+ * The wakelock mechanism in DMC allows the display engine to exit DC
+ * states explicitly before programming registers that may be powered
+ * down. In earlier hardware, this was done automatically and
+ * implicitly when the display engine accessed a register. With the
+ * wakelock implementation, the driver asserts a wakelock in DMC,
+ * which forces it to exit the DC state until the wakelock is
+ * deasserted.
+ *
+ * The mechanism can be enabled and disabled by writing to the
+ * DMC_WAKELOCK_CFG register. There are also 13 control registers
+ * that can be used to hold and release different wakelocks. In the
+ * current implementation, we only need one wakelock, so only
+ * DMC_WAKELOCK1_CTL is used. The other definitions are here for
+ * potential future use.
+ */
+
+#define DMC_WAKELOCK_CTL_TIMEOUT 5
+#define DMC_WAKELOCK_HOLD_TIME 50
+
+struct intel_dmc_wl_range {
+ u32 start;
+ u32 end;
+};
+
+static struct intel_dmc_wl_range lnl_wl_range[] = {
+ { .start = 0x60000, .end = 0x7ffff },
+};
+
+static void __intel_dmc_wl_release(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_dmc_wl *wl = &display->wl;
+
+ WARN_ON(refcount_read(&wl->refcount));
+
+ queue_delayed_work(i915->unordered_wq, &wl->work,
+ msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
+}
+
+static void intel_dmc_wl_work(struct work_struct *work)
+{
+ struct intel_dmc_wl *wl =
+ container_of(work, struct intel_dmc_wl, work.work);
+ struct intel_display *display =
+ container_of(wl, struct intel_display, wl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ /* Bail out if refcount reached zero while waiting for the spinlock */
+ if (!refcount_read(&wl->refcount))
+ goto out_unlock;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock release timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+static bool intel_dmc_wl_check_range(u32 address)
+{
+ int i;
+ bool wl_needed = false;
+
+ for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
+ if (address >= lnl_wl_range[i].start &&
+ address <= lnl_wl_range[i].end) {
+ wl_needed = true;
+ break;
+ }
+ }
+
+ return wl_needed;
+}
+
+static bool __intel_dmc_wl_supported(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_dmc_has_payload(i915) ||
+ !display->params.enable_dmc_wl)
+ return false;
+
+ return true;
+}
+
+void intel_dmc_wl_init(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
+ if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ return;
+
+ INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
+ spin_lock_init(&wl->lock);
+ refcount_set(&wl->refcount, 0);
+}
+
+void intel_dmc_wl_enable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (wl->enabled)
+ goto out_unlock;
+
+ /*
+ * Enable wakelock in DMC. We shouldn't try to take the
+ * wakelock, because we're just enabling it, so call the
+ * non-locking version directly here.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
+
+ wl->enabled = true;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_disable(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ flush_delayed_work(&wl->work);
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ /* Disable wakelock in DMC */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
+
+ refcount_set(&wl->refcount, 0);
+ wl->enabled = false;
+ wl->taken = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ cancel_delayed_work(&wl->work);
+
+ if (refcount_inc_not_zero(&wl->refcount))
+ goto out_unlock;
+
+ refcount_set(&wl->refcount, 1);
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (!wl->taken) {
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ goto out_unlock;
+ }
+
+ wl->taken = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
+
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
+ return;
+
+ if (!intel_dmc_wl_check_range(reg.reg))
+ return;
+
+ spin_lock_irqsave(&wl->lock, flags);
+
+ if (!wl->enabled)
+ goto out_unlock;
+
+ if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
+ "Tried to put wakelock with refcount zero\n"))
+ goto out_unlock;
+
+ if (refcount_dec_and_test(&wl->refcount)) {
+ __intel_dmc_wl_release(display);
+
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&wl->lock, flags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
new file mode 100644
index 000000000000..adab51208d0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_WAKELOCK_H__
+#define __INTEL_WAKELOCK_H__
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+
+#include "i915_reg_defs.h"
+
+struct intel_display;
+
+struct intel_dmc_wl {
+ spinlock_t lock; /* protects enabled, taken and refcount */
+ bool enabled;
+ bool taken;
+ refcount_t refcount;
+ struct delayed_work work;
+};
+
+void intel_dmc_wl_init(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+
+#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0c3ed37b350..e05e25cd4a94 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -67,6 +67,7 @@
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -88,6 +89,9 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+/* Max DSC line buffer depth supported by HW. */
+#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
+
/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
@@ -122,6 +126,14 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return HAS_AS_SDP(i915) &&
+ drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
+}
+
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
@@ -213,7 +225,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
* Sink rates for 128b/132b. If set, sink should support all 8b/10b
* rates and 10 Gbps.
*/
- if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ if (drm_dp_128b132b_supported(intel_dp->dpcd)) {
u8 uhbr_rates = 0;
BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
@@ -424,7 +436,7 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
return max_rate;
}
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -442,11 +454,9 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -462,11 +472,9 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- if (intel_is_c10phy(i915, phy))
+ if (intel_encoder_is_c10phy(encoder))
return 810000;
return 2000000;
@@ -498,7 +506,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* The values must be in increasing order */
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
- 810000, 1000000, 1350000, 2000000,
+ 810000, 1000000, 2000000,
};
static const int icl_rates[] = {
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
@@ -1197,15 +1205,15 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
}
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_connector *connector = intel_dp->attached_connector;
- if (!intel_dp_can_bigjoiner(intel_dp))
+ if (!intel_dp_has_bigjoiner(intel_dp))
return false;
- return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+ return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
connector->force_bigjoiner_enable;
}
@@ -1219,7 +1227,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->max_dotclk_freq;
+ int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
@@ -1232,6 +1240,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
status = intel_panel_mode_valid(connector, mode);
@@ -1241,10 +1252,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
}
@@ -1305,11 +1314,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1421,7 +1426,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@@ -1702,7 +1708,6 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- u8 line_buf_depth;
int ret;
/*
@@ -1731,20 +1736,14 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd);
- if (!line_buf_depth) {
+ vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
+ drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
+ if (!vdsc_cfg->line_buf_depth) {
drm_dbg_kms(&i915->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
-
vdsc_cfg->block_pred_enable =
connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
@@ -1916,8 +1915,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
@@ -2398,6 +2398,16 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
+{
+ /*
+ * Pipe joiner needs compression up to display 12 due to bandwidth
+ * limitation. DG2 onwards pipe joiner can be enabled without
+ * compression.
+ */
+ return DISPLAY_VER(i915) < 13 && use_joiner;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2406,30 +2416,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
- bool joiner_needs_dsc = false;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
return -EINVAL;
- if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
- /*
- * Pipe joiner needs compression up to display 12 due to bandwidth
- * limitation. DG2 onwards pipe joiner can be enabled without
- * compression.
- */
- joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2612,6 +2617,29 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
}
+static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->vrr.enable ||
+ !intel_dp_as_sdp_supported(intel_dp))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+
+ /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
+ as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
+ as_sdp->length = 0x9;
+ as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->vtotal = adjusted_mode->vtotal;
+ as_sdp->target_rr = 0;
+ as_sdp->duration_incr_ms = 0;
+ as_sdp->duration_incr_ms = 0;
+}
+
static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2683,15 +2711,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
- return true;
-
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
-}
-
static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
@@ -2714,7 +2733,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2731,7 +2750,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -2972,6 +2995,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
+ intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
@@ -3364,6 +3388,14 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
+ if (CAN_PANEL_REPLAY(intel_dp)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
return fastset;
}
@@ -4047,39 +4079,84 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp->downstream_ports) == 0;
}
-static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
+{
+ if (mst_mode == DRM_DP_MST)
+ return "MST";
+ else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
+ return "SST w/ sideband messaging";
+ else
+ return "SST";
+}
+
+static enum drm_dp_mst_mode
+intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
+ enum drm_dp_mst_mode sink_mst_mode)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->display.params.enable_dp_mst &&
- intel_dp_mst_source_support(intel_dp) &&
- drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ if (!i915->display.params.enable_dp_mst)
+ return DRM_DP_SST;
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ return DRM_DP_SST;
+
+ if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
+ !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
+ return DRM_DP_SST;
+
+ return sink_mst_mode;
}
-static void
-intel_dp_configure_mst(struct intel_dp *intel_dp)
+static enum drm_dp_mst_mode
+intel_dp_mst_detect(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct intel_encoder *encoder =
- &dp_to_dig_port(intel_dp)->base;
- bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum drm_dp_mst_mode sink_mst_mode;
+ enum drm_dp_mst_mode mst_detect;
+
+ sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+
+ mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
- str_yes_no(sink_can_mst),
- str_yes_no(i915->display.params.enable_dp_mst));
+ intel_dp_mst_mode_str(sink_mst_mode),
+ str_yes_no(i915->display.params.enable_dp_mst),
+ intel_dp_mst_mode_str(mst_detect));
+
+ return mst_detect;
+}
+static void
+intel_dp_mst_configure(struct intel_dp *intel_dp)
+{
if (!intel_dp_mst_source_support(intel_dp))
return;
- intel_dp->is_mst = sink_can_mst &&
- i915->display.params.enable_dp_mst;
+ intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+
+ /* Avoid stale info on the next detect cycle. */
+ intel_dp->mst_detect = DRM_DP_SST;
+}
+
+static void
+intel_dp_mst_disconnect(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp->is_mst)
+ return;
+
+ drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
static bool
@@ -4127,6 +4204,32 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
+static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
+ struct dp_sdp *sdp, size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ /* Prepare AS (Adaptive Sync) SDP Header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = as_sdp->sdp_type;
+ sdp->sdp_header.HB2 = 0x02;
+ sdp->sdp_header.HB3 = as_sdp->length;
+
+ /* Fill AS (Adaptive Sync) SDP Payload */
+ sdp->db[0] = as_sdp->mode;
+ sdp->db[1] = as_sdp->vtotal & 0xFF;
+ sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
+ sdp->db[3] = as_sdp->target_rr & 0xFF;
+ sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
+
+ return length;
+}
+
static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
const struct hdmi_drm_infoframe *drm_infoframe,
@@ -4226,6 +4329,10 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
+ sizeof(sdp));
+ break;
default:
MISSING_CASE(type);
return;
@@ -4247,6 +4354,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+
+ if (HAS_AS_SDP(dev_priv))
+ dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
+
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
@@ -4264,10 +4375,42 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
return;
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
+static
+int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(as_sdp, 0, sizeof(*as_sdp));
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB2 != 0x02)
+ return -EINVAL;
+
+ if ((sdp->sdp_header.HB3 & 0x3F) != 9)
+ return -EINVAL;
+
+ as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
+ as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
+ as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
+ as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
+
+ return 0;
+}
+
static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
const void *buffer, size_t size)
{
@@ -4338,6 +4481,29 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
return 0;
}
+static void
+intel_read_dp_as_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_as_sdp *as_sdp)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_ADAPTIVE_SYNC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+}
+
static int
intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
const void *buffer, size_t size)
@@ -4444,6 +4610,10 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
&crtc_state->infoframes.drm.drm);
break;
+ case DP_SDP_ADAPTIVE_SYNC:
+ intel_read_dp_as_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.as_sdp);
+ break;
default:
MISSING_CASE(type);
break;
@@ -5371,6 +5541,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
+ intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
+
/* if there's no downstream port, we're done */
if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
@@ -5382,7 +5554,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
connector_status_connected : connector_status_disconnected;
}
- if (intel_dp_can_mst(intel_dp))
+ if (intel_dp->mst_detect == DRM_DP_MST)
return connector_status_connected;
/* If no HPD, poke DDC gently */
@@ -5687,15 +5859,7 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
- if (intel_dp->is_mst) {
- drm_dbg_kms(&dev_priv->drm,
- "MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
- }
+ intel_dp_mst_disconnect(intel_dp);
intel_dp_tunnel_disconnect(intel_dp);
@@ -5714,7 +5878,7 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
- intel_dp_configure_mst(intel_dp);
+ intel_dp_mst_configure(intel_dp);
/*
* TODO: Reset link params when switching to MST mode, until MST
@@ -6497,7 +6661,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -6522,7 +6685,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
@@ -6565,6 +6728,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index c540d3a73fe7..106ecfde36d9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -88,6 +88,7 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
@@ -119,7 +120,8 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
+bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner);
+bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
@@ -149,6 +151,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4f4a0e3b3114..b8a53bb174da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -61,9 +61,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
u32 status;
int ret;
- ret = __intel_de_wait_for_register(i915, ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY, 0,
- 2, timeout_ms, &status);
+ ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ 2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(&i915->drm,
@@ -143,9 +142,15 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble;
}
-static int intel_dp_aux_fw_sync_len(void)
+int intel_dp_aux_fw_sync_len(void)
{
- int precharge = 10; /* 10-16 */
+ /*
+ * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
+ * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
+ * is fixing these problems with the panel. It is still within range
+ * mentioned in eDP specification.
+ */
+ int precharge = 12; /* 10-16 */
int preamble = 8;
return precharge + preamble;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 8447f3e601fe..76d1f2ed7c2f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -20,5 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
+int intel_dp_aux_fw_sync_len(void);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b98a87883fef..92b03073acdd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
+ *hdcp_capable = false;
+ *hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
@@ -766,11 +769,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
- stream_enc_status,
- enable ? stream_enc_status : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status, enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
@@ -801,11 +802,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait_for_register(i915,
- HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
- STREAM_ENCRYPTION_STATUS,
- enable ? STREAM_ENCRYPTION_STATUS : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index fb84ca98bb7a..947575140059 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -334,7 +334,7 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 11;
+ DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 53aec023ce92..715d2f59f565 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -51,25 +51,39 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
-static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
- const struct drm_display_mode *adjusted_mode,
- struct intel_crtc_state *crtc_state,
- bool dsc)
+static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
+ bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
- int output_bpp = bpp;
- /* DisplayPort 2 128b/132b, bits per lane is always 32 */
- int symbol_clock = crtc_state->port_clock / 32;
-
- if (output_bpp * adjusted_mode->crtc_clock >=
- symbol_clock * 72) {
- drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n",
- output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72);
- return -EINVAL;
- }
- }
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- return 0;
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ return INT_MAX;
+
+ /*
+ * DSC->DPT interface width:
+ * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
+ * LNL+: 144 bits (not a bottleneck in any config)
+ *
+ * Bspec/49259 suggests that the FEC overhead needs to be
+ * applied here, though HW people claim that neither this FEC
+ * or any other overhead is applicable here (that is the actual
+ * available_bw is just symbol_clock * 72). However based on
+ * testing on MTL-P the
+ * - DELL U3224KBA display
+ * - Unigraf UCD-500 CTS test sink
+ * devices the
+ * - 5120x2880/995.59Mhz
+ * - 6016x3384/1357.23Mhz
+ * - 6144x3456/1413.39Mhz
+ * modes (all the ones having a DPT limit on the above devices),
+ * both the channel coding efficiency and an additional 3%
+ * overhead needs to be accounted for.
+ */
+ return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
+ drm_dp_bw_channel_coding_efficiency(true)),
+ mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
@@ -88,11 +102,10 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
if (dsc) {
flags |= DRM_DP_BW_OVERHEAD_DSC;
- /* TODO: add support for bigjoiner */
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
adjusted_mode->clock,
adjusted_mode->hdisplay,
- false);
+ crtc_state->bigjoiner_pipes);
}
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
@@ -158,6 +171,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int max_dpt_bpp;
int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
@@ -178,6 +192,13 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->port_clock,
crtc_state->lane_count);
+ max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
+ if (max_bpp > max_dpt_bpp) {
+ drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ max_bpp, max_dpt_bpp);
+ max_bpp = max_dpt_bpp;
+ }
+
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
@@ -189,10 +210,6 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
- ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
- if (ret)
- continue;
-
link_bpp_x16 = to_bpp_x16(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
@@ -404,15 +421,22 @@ static int mode_hblank_period_ns(const struct drm_display_mode *mode)
static bool
hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct link_config_limits *limits)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ bool is_uhbr_sink = connector->mst_port &&
+ drm_dp_128b132b_supported(connector->mst_port->dpcd);
+ int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
return false;
- if (mode_hblank_period_ns(adjusted_mode) > 300)
+ if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
return true;
@@ -428,7 +452,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
- if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
return true;
if (!dsc) {
@@ -525,14 +549,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- const struct intel_connector *connector =
+ struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
- bool dsc_needed;
+ bool dsc_needed, joiner_needs_dsc;
int ret = 0;
if (pipe_config->fec_enable &&
@@ -542,11 +567,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ if (intel_dp_need_bigjoiner(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
+ pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- dsc_needed = intel_dp->force_dsc_en ||
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->bigjoiner_pipes);
+
+ dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
connector,
pipe_config,
@@ -566,8 +598,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n",
- str_yes_no(ret),
+ drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_mst_dsc_source_support(pipe_config))
@@ -613,7 +645,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
@@ -954,6 +986,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_crtc *pipe_crtc;
bool last_mst_stream;
intel_dp->active_mst_links--;
@@ -962,7 +995,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
- intel_crtc_vblank_off(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_off(old_pipe_crtc_state);
+ }
intel_disable_transcoder(old_crtc_state);
@@ -980,12 +1019,18 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ const struct intel_crtc_state *old_pipe_crtc_state =
+ intel_atomic_get_old_crtc_state(state, pipe_crtc);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_scaler_disable(old_crtc_state);
- else
- ilk_pfit_disable(old_crtc_state);
+ intel_dsc_disable(old_pipe_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_scaler_disable(old_pipe_crtc_state);
+ else
+ ilk_pfit_disable(old_pipe_crtc_state);
+ }
/*
* Power down mst path before disabling the port, otherwise we end
@@ -1117,6 +1162,39 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
+static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 clear = 0;
+ u32 set = 0;
+
+ if (!IS_ALDERLAKE_P(i915))
+ return;
+
+ if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ return;
+
+ /* Wa_14013163432:adlp */
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
+
+ /* Wa_14014143976:adlp */
+ if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+ else if (crtc_state->fec_enable)
+ clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
+
+ if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
+ set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
+ }
+
+ if (!clear && !set)
+ return;
+
+ intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+}
+
static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -1131,6 +1209,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
+ struct intel_crtc *pipe_crtc;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -1145,6 +1224,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
+ enable_bs_jitter_was(pipe_config);
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
clear_act_sent(encoder, pipe_config);
@@ -1160,7 +1241,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (DISPLAY_VER(dev_priv) >= 12)
@@ -1172,7 +1253,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_enable_transcoder(pipe_config);
- intel_crtc_vblank_on(pipe_config);
+ for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ intel_crtc_joined_pipe_mask(pipe_config)) {
+ const struct intel_crtc_state *pipe_crtc_state =
+ intel_atomic_get_new_crtc_state(state, pipe_crtc);
+
+ intel_crtc_vblank_on(pipe_crtc_state);
+ }
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
@@ -1285,7 +1372,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false, bigjoiner = false;
@@ -1302,8 +1389,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (*status != MODE_OK)
return 0;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- *status = MODE_NO_DBLESCAN;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
+
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
return 0;
}
@@ -1314,10 +1406,6 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
- ret = drm_modeset_lock(&mgr->base.lock, ctx);
- if (ret)
- return ret;
-
/*
* TODO:
* - Also check if compression would allow for the mode
@@ -1330,32 +1418,23 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
- *status = MODE_CLOCK_HIGH;
- return 0;
- }
-
- if (mode->clock < 10000) {
- *status = MODE_CLOCK_LOW;
- return 0;
- }
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
- *status = MODE_H_ILLEGAL;
- return 0;
- }
-
- if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ if (intel_dp_need_bigjoiner(intel_dp, intel_connector,
+ mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
+ }
+
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
- /* TODO: add support for bigjoiner */
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1383,11 +1462,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- /*
- * Big joiner configuration needs DSC for TGL which is not true for
- * XE_LPD where uncompressed joiner is supported.
- */
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
+ if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1397,7 +1472,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
return 0;
}
@@ -1509,24 +1584,41 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- if (!connector->dp.dsc_decompression_aux)
+ if (!aux)
return false;
- if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
- &desc, true) < 0)
+ /*
+ * A logical port's OUI (at least for affected sinks) is all 0, so
+ * instead of that the parent port's OUI is used for identification.
+ */
+ if (drm_dp_mst_port_is_logical(connector->port)) {
+ aux = drm_dp_mst_aux_for_parent(connector->port);
+ if (!aux)
+ aux = &connector->mst_port->aux;
+ }
+
+ if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
return false;
- if (!drm_dp_has_quirk(&desc,
- DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
return false;
- if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
return false;
- if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ /*
+ * UHBR (MST sink) devices requiring this quirk don't advertise the
+ * HBLANK expansion support. Presuming that they perform HBLANK
+ * expansion internally, or are affected by this issue on modes with a
+ * short HBLANK for other reasons.
+ */
+ if (!drm_dp_128b132b_supported(dpcd) &&
+ !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 75d76f91ecbd..6503abdc2b98 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -348,7 +348,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
out_err:
drm_dbg_kms(&i915->drm,
- "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 4ca910874a4f..d20e4e9cf7f7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -29,6 +30,7 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
/**
@@ -123,9 +125,9 @@
*/
/**
- * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy
*/
-struct bxt_ddi_phy_info {
+struct bxt_dpio_phy_info {
/**
* @dual_channel: true if this phy has a second channel.
*/
@@ -161,7 +163,7 @@ struct bxt_ddi_phy_info {
} channel[2];
};
-static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
@@ -183,7 +185,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = false,
.rcomp_phy = DPIO_PHY1,
@@ -216,23 +218,23 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
if (IS_GEMINILAKE(dev_priv)) {
- *count = ARRAY_SIZE(glk_ddi_phy_info);
- return glk_ddi_phy_info;
+ *count = ARRAY_SIZE(glk_dpio_phy_info);
+ return glk_dpio_phy_info;
} else {
- *count = ARRAY_SIZE(bxt_ddi_phy_info);
- return bxt_ddi_phy_info;
+ *count = ARRAY_SIZE(bxt_dpio_phy_info);
+ return bxt_dpio_phy_info;
}
}
-static const struct bxt_ddi_phy_info *
+static const struct bxt_dpio_phy_info *
bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
int count;
- const struct bxt_ddi_phy_info *phy_list =
+ const struct bxt_dpio_phy_info *phy_list =
bxt_get_phy_list(dev_priv, &count);
return &phy_list[phy];
@@ -241,7 +243,7 @@ bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
- const struct bxt_ddi_phy_info *phy_info, *phys;
+ const struct bxt_dpio_phy_info *phy_info, *phys;
int i, count;
phys = bxt_get_phy_list(dev_priv, &count);
@@ -269,16 +271,32 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
*ch = DPIO_CH0;
}
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+/*
+ * Like intel_de_rmw() but reads from a single per-lane register and
+ * writes to the group register to write the same value to all the lanes.
+ */
+static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915,
+ i915_reg_t reg_single,
+ i915_reg_t reg_group,
+ u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read(i915, reg_single);
+ val = (old & ~clear) | set;
+ intel_de_write(i915, reg_group, val);
+
+ return old;
+}
+
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
enum dpio_phy phy;
- int n_entries;
- u32 val;
+ int lane, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
@@ -290,41 +308,51 @@ void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
- trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
- val &= ~SCALE_DCOMP_METHOD;
- if (trans->entries[level].bxt.enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- drm_err(&dev_priv->drm,
- "Disabled scaling while ouniqetrangenmethod was set");
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
- val &= ~DE_EMPHASIS;
- val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
-
- val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane),
+ MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
+ MARGIN_000(trans->entries[level].bxt.margin) |
+ UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+ u32 val;
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane),
+ SCALE_DCOMP_METHOD,
+ trans->entries[level].bxt.enable ?
+ SCALE_DCOMP_METHOD : 0);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane));
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
+ }
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ int level = intel_ddi_level(encoder, crtc_state, lane);
+
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane),
+ DE_EMPHASIS_MASK,
+ DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
+ }
+
+ bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ BXT_PORT_PCS_DW10_GRP(phy, ch),
+ 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -353,7 +381,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ return REG_FIELD_GET(GRC_CODE_MASK, val);
}
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
@@ -365,20 +393,20 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
phy);
}
-static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_dpio_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ if (bxt_dpio_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
@@ -399,20 +427,17 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(&dev_priv->uncore,
- BXT_PORT_CL1CM_DW0(phy),
- PHY_RESERVED | PHY_POWER_GOOD,
- PHY_POWER_GOOD,
- 1))
+ if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
phy);
/* Program PLL Rcomp code offset */
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
- 0xE4 << IREF0RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
- 0xE4 << IREF1RC_OFFSET_SHIFT);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
/* Program power gating */
intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
@@ -435,9 +460,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
dev_priv->display.state.bxt_phy_grc = val;
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
+ grc_code = GRC_CODE_FAST(val) |
+ GRC_CODE_SLOW(val) |
+ GRC_CODE_NOM(val);
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
0, GRC_DIS | GRC_RDY_OVRD);
@@ -449,9 +474,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -460,9 +485,9 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info =
+ const struct bxt_dpio_phy_info *phy_info =
bxt_get_phy_info(dev_priv, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
@@ -471,19 +496,19 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
was_enabled = true;
if (rcomp_phy != -1)
- was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+ was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
- _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ _bxt_dpio_phy_init(dev_priv, rcomp_phy);
- _bxt_ddi_phy_init(dev_priv, phy);
+ _bxt_dpio_phy_init(dev_priv, phy);
if (!was_enabled)
- bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+ bxt_dpio_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@@ -513,10 +538,10 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info;
+ const struct bxt_dpio_phy_info *phy_info;
u32 mask;
bool ok;
@@ -526,23 +551,23 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ if (!bxt_dpio_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
+ IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
+ IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4),
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy_info->dual_channel)
ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
@@ -552,9 +577,9 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
if (phy_info->rcomp_phy != -1) {
u32 grc_code = dev_priv->display.state.bxt_phy_grc;
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
+ grc_code = GRC_CODE_FAST(grc_code) |
+ GRC_CODE_SLOW(grc_code) |
+ GRC_CODE_NOM(grc_code);
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
@@ -562,7 +587,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
- "BXT_PORT_REF_DW8(%d)", phy);
+ "BXT_PORT_REF_DW8(%d)", phy);
}
return ok;
@@ -570,7 +595,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
u8
-bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@@ -586,8 +611,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
}
}
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -598,24 +623,18 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = intel_de_read(dev_priv,
- BXT_PORT_TX_DW14_LN(phy, ch, lane));
-
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
- val &= ~LATENCY_OPTIM;
- if (lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
- val);
+ intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ LATENCY_OPTIM,
+ lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
}
}
u8
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -701,9 +720,8 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
int i;
@@ -740,7 +758,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
@@ -749,15 +767,15 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+ val |= DPIO_SWING_MARGIN000(margin_reg_value);
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+ val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
+ val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
@@ -796,9 +814,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
u32 val;
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
@@ -843,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -866,39 +884,39 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
+ val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
else
- val |= CHV_PCS_USEDCLKCHANNEL;
+ val &= ~DPIO_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
@@ -908,10 +926,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
+ if (pipe == PIPE_B)
val |= CHV_CMN_USEDCLKCHANNEL;
+ else
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
@@ -923,9 +941,8 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
int data, i, stagger;
u32 val;
@@ -946,11 +963,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
if (crtc_state->lane_count == 1)
- data = 0x0;
+ data = 0;
else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
+ data = (i == 1) ? 0 : DPIO_UPAR;
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
}
/* Data lane stagger programming */
@@ -1012,21 +1028,21 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1050,24 +1066,23 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1077,26 +1092,25 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ DPIO_PCS_CLK_DATAWIDTH_8_10 |
+ DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1108,26 +1122,23 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
enum pipe pipe = crtc->pipe;
- enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
+ val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe == PIPE_B)
+ val |= DPIO_PCS_USEDCLKCHANNEL;
+ val |= 0xc4;
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1137,12 +1148,11 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 9adc4e8c1738..226994dcb89b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -29,18 +29,18 @@ enum dpio_phy {
#ifdef I915
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
@@ -77,35 +77,35 @@ static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, en
enum dpio_phy *phy, enum dpio_channel *ch)
{
}
-static inline void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
}
-static inline void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
}
-static inline bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return false;
}
-static inline bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
return true;
}
-static inline u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+static inline u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
return 0;
}
-static inline void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask)
+static inline void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
{
}
-static inline u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+static inline u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 3038655377ea..a981f45facb3 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -20,6 +20,7 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
+#include "vlv_dpio_phy_regs.h"
#include "vlv_sideband.h"
struct intel_dpll_funcs {
@@ -369,38 +370,68 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
+static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
+ if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return i915->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(i915))
return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(i915) != 2)
return 96000;
else
return 48000;
}
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ u32 tmp;
+
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ else
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+
+ hw_state->dpll_md = tmp;
+ }
+
+ hw_state->dpll = intel_de_read(dev_priv, DPLL(crtc->pipe));
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ hw_state->dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
+}
+
/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ u32 dpll = hw_state->dpll;
u32 fp;
struct dpll clock;
int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
+ int refclk = i9xx_pll_refclk(crtc_state);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
+ fp = hw_state->fp0;
else
- fp = pipe_config->dpll_hw_state.fp1;
+ fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev_priv)) {
@@ -475,68 +506,69 @@ void i9xx_crtc_clock_get(struct intel_crtc *crtc,
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
- pipe_config->port_clock = port_clock;
+ crtc_state->port_clock = port_clock;
}
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- struct dpll clock;
- u32 mdiv;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
int refclk = 100000;
+ struct dpll clock;
+ u32 tmp;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+ clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp);
+ clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp);
+ clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp);
+ clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp);
+ clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp);
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
/* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0)
return;
vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
vlv_dpio_put(dev_priv);
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
+ clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22;
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+ clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2);
+ clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1);
+ clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13);
+ clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13);
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+ crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock);
}
/*
@@ -958,37 +990,20 @@ static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
return (1 << dpll->n) << 16 | dpll->m2;
}
-static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(clock);
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(clock);
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1047,27 +1062,40 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
+ return dpll;
}
-static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
+ if (IS_PINEVIEW(dev_priv)) {
+ hw_state->fp0 = pnv_dpll_compute_fp(clock);
+ hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
- dpll = DPLL_VGA_MODE_DIS;
+static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -1104,8 +1132,19 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
+ return dpll;
+}
+
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->fp0 = i9xx_dpll_compute_fp(clock);
+ hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock);
+
+ hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock);
}
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1185,62 +1224,54 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
return 0;
}
+static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ return 25;
+
+ if (crtc_state->sdvo_tv_clock)
+ return 20;
+
+ return 21;
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
}
-static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
+ u32 fp;
fp = i9xx_dpll_compute_fp(clock);
if (ilk_needs_fb_cb_tune(clock, factor))
fp |= FP_CB_TUNE;
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- if (ilk_needs_fb_cb_tune(reduced_clock, factor))
- fp2 |= FP_CB_TUNE;
-
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
+ return fp;
}
-static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
- const struct dpll *clock,
- const struct dpll *reduced_clock)
+static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
-
- dpll = 0;
+ dpll = DPLL_VCO_ENABLE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
@@ -1302,9 +1333,20 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- dpll |= DPLL_VCO_ENABLE;
+ return dpll;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ int factor = ilk_fb_cb_factor(crtc_state);
+
+ hw_state->fp0 = ilk_dpll_compute_fp(clock, factor);
+ hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor);
- crtc_state->dpll_hw_state.dpll = dpll;
+ hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock);
}
static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1377,39 +1419,56 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
return intel_reserve_shared_dplls(state, crtc, NULL);
}
-void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+static u32 vlv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
}
-void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = vlv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
+}
+
+static u32 chv_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 dpll;
- crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+
if (crtc->pipe != PIPE_A)
- crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll_md =
- (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ return dpll;
+}
+
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+
+ hw_state->dpll = chv_dpll(crtc_state);
+ hw_state->dpll_md = i965_dpll_md(crtc_state);
}
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1765,7 +1824,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll = crtc_state->dpll_hw_state.dpll;
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1775,157 +1834,152 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
if (i9xx_has_pps(dev_priv))
assert_pps_unlocked(dev_priv, pipe);
- intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
+ intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+ enum dpio_phy phy, enum dpio_channel ch)
{
- u32 reg_val;
+ u32 tmp;
/*
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ tmp |= 0x00000030;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0x8c000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch));
+ tmp &= 0xffffff00;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp);
+
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11);
+ tmp &= 0x00ffffff;
+ tmp |= 0xb0000000;
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
- u32 mdiv;
- u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val;
+ u32 tmp, coreclk;
vlv_dpio_get(dev_priv);
- bestn = crtc_state->dpll.n;
- bestm1 = crtc_state->dpll.m1;
- bestm2 = crtc_state->dpll.m2;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
-
/* See eDP HDMI DPIO driver vbios notes doc */
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy);
+ vlv_pllb_recal_opamp(dev_priv, phy, ch);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
- reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
+ tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch));
+ tmp &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp);
/* Disable fast lock */
vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
- mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
- mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
- mdiv |= ((bestn << DPIO_N_SHIFT));
- mdiv |= (1 << DPIO_K_SHIFT);
+ tmp = DPIO_M1_DIV(clock->m1) |
+ DPIO_M2_DIV(clock->m2) |
+ DPIO_P1_DIV(clock->p1) |
+ DPIO_P2_DIV(clock->p2) |
+ DPIO_N_DIV(clock->n) |
+ DPIO_K_DIV(1);
/*
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
* but we don't support that).
* Note: don't use the DAC post divider as it seems unstable.
*/
- mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
- mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
+ tmp |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x009f0003);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df70000);
else
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk);
- vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1934,9 +1988,10 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
@@ -1948,6 +2003,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -1957,16 +2013,14 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
vlv_prepare_pll(crtc_state);
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1974,93 +2028,87 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct dpll *clock = &crtc_state->dpll;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
- u32 loopfilter, tribuf_calcntr;
- u32 bestm2, bestp1, bestp2, bestm2_frac;
- u32 dpio_val;
- int vco;
-
- bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
- bestm2 = crtc_state->dpll.m2 >> 22;
- bestp1 = crtc_state->dpll.p1;
- bestp2 = crtc_state->dpll.p2;
- vco = crtc_state->dpll.vco;
- dpio_val = 0;
- loopfilter = 0;
+ u32 tmp, loopfilter, tribuf_calcntr;
+ u32 m2_frac;
+
+ m2_frac = clock->m2 & 0x3fffff;
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
- 5 << DPIO_CHV_S1_DIV_SHIFT |
- bestp1 << DPIO_CHV_P1_DIV_SHIFT |
- bestp2 << DPIO_CHV_P2_DIV_SHIFT |
- 1 << DPIO_CHV_K_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch),
+ DPIO_CHV_S1_DIV(5) |
+ DPIO_CHV_P1_DIV(clock->p1) |
+ DPIO_CHV_P2_DIV(clock->p2) |
+ DPIO_CHV_K_DIV(1));
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch),
+ DPIO_CHV_M2_DIV(clock->m2 >> 22));
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
- DPIO_CHV_M1_DIV_BY_2 |
- 1 << DPIO_CHV_N_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch),
+ DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) |
+ DPIO_CHV_N_DIV(1));
/* M2 fraction division */
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch),
+ DPIO_CHV_M2_FRAC_DIV(m2_frac));
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
- dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
- dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
- if (bestm2_frac)
- dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch));
+ tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ tmp |= DPIO_CHV_FEEDFWD_GAIN(2);
+ if (m2_frac)
+ tmp |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
- dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
- DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
- dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
- if (!bestm2_frac)
- dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch));
+ tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5);
+ if (!m2_frac)
+ tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp);
/* Loop filter */
- if (vco == 5400000) {
- loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ if (clock->vco == 5400000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x3) |
+ DPIO_CHV_INT_COEFF(0x8) |
+ DPIO_CHV_GAIN_CTRL(0x1);
tribuf_calcntr = 0x9;
- } else if (vco <= 6200000) {
- loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6200000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x5) |
+ DPIO_CHV_INT_COEFF(0xB) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x9;
- } else if (vco <= 6480000) {
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ } else if (clock->vco <= 6480000) {
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0x8;
} else {
/* Not supported. Apply the same limits as in the max case */
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ loopfilter = DPIO_CHV_PROP_COEFF(0x4) |
+ DPIO_CHV_INT_COEFF(0x9) |
+ DPIO_CHV_GAIN_CTRL(0x3);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
- dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
- dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch));
+ tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp);
/* AFC Recal */
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
- DPIO_AFC_RECAL);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) |
+ DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
}
@@ -2069,17 +2117,18 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
+ enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe);
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ enum pipe pipe = crtc->pipe;
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp);
vlv_dpio_put(dev_priv);
@@ -2089,7 +2138,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
@@ -2100,6 +2149,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
@@ -2109,9 +2159,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(pipe),
- crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ hw_state->dpll & ~DPLL_VCO_ENABLE);
- if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ if (hw_state->dpll & DPLL_VCO_ENABLE) {
chv_prepare_pll(crtc_state);
_chv_enable_pll(crtc_state);
}
@@ -2124,10 +2174,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* the value from DPLLBMD to either pipe B or C.
*/
intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B), hw_state->dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
@@ -2137,8 +2186,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
(intel_de_read(dev_priv, DPLL(PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md);
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -2199,7 +2247,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2217,9 +2265,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index ac01bb19cc6c..a86a79408af0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dpll_hw_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
@@ -22,6 +23,8 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
+ struct intel_dpll_hw_state *dpll_hw_state);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -39,12 +42,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state);
+void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index ff480f171f75..90998b037349 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,6 +24,7 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include "bxt_dpio_phy_regs.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -64,7 +65,8 @@ struct intel_shared_dpll_funcs {
* the pll is not already enabled.
*/
void (*enable)(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll);
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for disabling the pll, called from intel_disable_shared_dpll()
@@ -81,7 +83,7 @@ struct intel_shared_dpll_funcs {
*/
bool (*get_hw_state)(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
/*
* Hook for calculating the pll's output frequency based on its passed
@@ -89,7 +91,7 @@ struct intel_shared_dpll_funcs {
*/
int (*get_freq)(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
};
struct intel_dpll_mgr {
@@ -107,8 +109,8 @@ struct intel_dpll_mgr {
struct intel_crtc *crtc,
struct intel_encoder *encoder);
void (*update_ref_clks)(struct drm_i915_private *i915);
- void (*dump_hw_state)(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ void (*dump_hw_state)(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
};
@@ -227,7 +229,7 @@ static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
if (pll->info->power_domain)
pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
- pll->info->funcs->enable(i915, pll);
+ pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
pll->on = true;
}
@@ -352,7 +354,7 @@ intel_dpll_mask_all(struct drm_i915_private *i915)
static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
- const struct intel_dpll_hw_state *pll_state,
+ const struct intel_dpll_hw_state *dpll_hw_state,
unsigned long dpll_mask)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -379,9 +381,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
continue;
}
- if (memcmp(pll_state,
+ if (memcmp(dpll_hw_state,
&shared_dpll[pll->index].hw_state,
- sizeof(*pll_state)) == 0) {
+ sizeof(*dpll_hw_state)) == 0) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
@@ -430,14 +432,14 @@ static void
intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_shared_dpll_state *shared_dpll;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[pll->index].pipe_mask == 0)
- shared_dpll[pll->index].hw_state = *pll_state;
+ shared_dpll[pll->index].hw_state = *dpll_hw_state;
intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
}
@@ -519,8 +521,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -553,17 +556,19 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
}
static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(i915);
- intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
- intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
+ intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
/* Wait for the clocks to stabilize. */
intel_de_posting_read(i915, PCH_DPLL(id));
@@ -574,7 +579,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
*
* So write it again.
*/
- intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
intel_de_posting_read(i915, PCH_DPLL(id));
udelay(200);
}
@@ -634,21 +639,25 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static void ibx_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void ibx_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
+
+ drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
-static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct i9xx_dpll_hw_state *a = &_a->i9xx;
+ const struct i9xx_dpll_hw_state *b = &_b->i9xx;
+
return a->dpll == b->dpll &&
a->dpll_md == b->dpll_md &&
a->fp0 == b->fp0 &&
@@ -677,19 +686,24 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
- intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
intel_de_posting_read(i915, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ intel_de_write(i915, SPLL_CTL, hw_state->spll);
intel_de_posting_read(i915, SPLL_CTL);
udelay(20);
}
@@ -728,8 +742,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
u32 val;
@@ -749,8 +764,9 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
intel_wakeref_t wakeref;
u32 val;
@@ -975,11 +991,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int refclk;
int n, p, r;
- u32 wrpll = pll_state->wrpll;
+ u32 wrpll = hw_state->wrpll;
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
@@ -1020,11 +1037,12 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- crtc_state->dpll_hw_state.wrpll =
+ hw_state->wrpll =
WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
@@ -1099,7 +1117,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
int link_clock = 0;
@@ -1127,11 +1145,12 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return -EINVAL;
- crtc_state->dpll_hw_state.spll =
+ hw_state->spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
return 0;
@@ -1150,11 +1169,12 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
int link_clock = 0;
- switch (pll_state->spll & SPLL_FREQ_MASK) {
+ switch (hw_state->spll & SPLL_FREQ_MASK) {
case SPLL_FREQ_810MHz:
link_clock = 81000;
break;
@@ -1225,16 +1245,21 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = 135000;
}
-static void hsw_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void hsw_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
+
+ drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
-static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct hsw_dpll_hw_state *a = &_a->hsw;
+ const struct hsw_dpll_hw_state *b = &_b->hsw;
+
return a->wrpll == b->wrpll &&
a->spll == b->spll;
}
@@ -1254,7 +1279,8 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *hw_state)
{
}
@@ -1265,7 +1291,7 @@ static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
return true;
}
@@ -1332,26 +1358,31 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
};
static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(i915, DPLL_CTRL1,
- DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
- pll->state.hw_state.ctrl1 << (id * 6));
+ DPLL_CTRL1_HDMI_MODE(id) |
+ DPLL_CTRL1_SSC(id) |
+ DPLL_CTRL1_LINK_RATE_MASK(id),
+ hw_state->ctrl1 << (id * 6));
intel_de_posting_read(i915, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
- skl_ddi_pll_write_ctrl1(i915, pll);
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
- intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
+ intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
intel_de_posting_read(i915, regs[id].cfgcr1);
intel_de_posting_read(i915, regs[id].cfgcr2);
@@ -1363,9 +1394,12 @@ static void skl_ddi_pll_enable(struct drm_i915_private *i915,
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- skl_ddi_pll_write_ctrl1(i915, pll);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
}
static void skl_ddi_pll_disable(struct drm_i915_private *i915,
@@ -1386,13 +1420,14 @@ static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 val;
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret;
+ u32 val;
wakeref = intel_display_power_get_if_enabled(i915,
POWER_DOMAIN_DISPLAY_CORE);
@@ -1423,8 +1458,9 @@ out:
static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -1695,16 +1731,17 @@ skip_remaining_dividers:
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+ p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1752,10 +1789,10 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return 0;
}
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
ref_clock;
- dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
@@ -1767,37 +1804,35 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
int ret;
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ hw_state->ctrl1 =
+ DPLL_CTRL1_OVERRIDE(0) |
+ DPLL_CTRL1_HDMI_MODE(0);
+
+ hw_state->cfgcr1 =
+ DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ hw_state->cfgcr2 =
+ DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
&crtc_state->dpll_hw_state);
@@ -1807,6 +1842,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
u32 ctrl1;
/*
@@ -1836,18 +1872,19 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
break;
}
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ hw_state->ctrl1 = ctrl1;
return 0;
}
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
int link_clock = 0;
- switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
@@ -1921,16 +1958,18 @@ static int skl_get_dpll(struct intel_atomic_state *state,
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
/*
* ctrl1 register is already shifted for each pll, just use 0 to get
* the internal shift for each field
*/
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
- return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
+ if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
else
- return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
+ return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
}
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -1939,19 +1978,21 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void skl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void skl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: "
- "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- hw_state->ctrl1,
- hw_state->cfgcr1,
- hw_state->cfgcr2);
+ const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
+
+ drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
}
-static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct skl_dpll_hw_state *a = &_a->skl;
+ const struct skl_dpll_hw_state *b = &_b->skl;
+
return a->ctrl1 == b->ctrl1 &&
a->cfgcr1 == b->cfgcr1 &&
a->cfgcr2 == b->cfgcr2;
@@ -1991,12 +2032,14 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
};
static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- u32 temp;
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
+ u32 temp;
bxt_port_to_phy_channel(i915, port, &phy, &ch);
@@ -2019,43 +2062,43 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
/* Write P1 & P2 */
intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
- PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
/* Write M2 integer */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
- PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
+ PORT_PLL_M2_INT_MASK, hw_state->pll0);
/* Write N */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
- PORT_PLL_N_MASK, pll->state.hw_state.pll1);
+ PORT_PLL_N_MASK, hw_state->pll1);
/* Write M2 fraction */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
- PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
+ PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
/* Write M2 fraction enable */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
- PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
+ PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
/* Write coeff */
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->state.hw_state.pll6;
+ temp |= hw_state->pll6;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
- PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
+ PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
- PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
+ PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->state.hw_state.pll10;
+ temp |= hw_state->pll10;
intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
@@ -2063,7 +2106,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp |= PORT_PLL_RECALIBRATE;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->state.hw_state.ebb4;
+ temp |= hw_state->ebb4;
intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
@@ -2075,7 +2118,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
drm_err(&i915->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(i915)) {
- temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
temp |= DCC_DELAY_RANGE_2;
intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
@@ -2087,7 +2130,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->state.hw_state.pcsdw12;
+ temp |= hw_state->pcsdw12;
intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
@@ -2112,8 +2155,9 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
enum dpio_phy phy;
@@ -2245,7 +2289,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
@@ -2283,45 +2327,47 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
else
lanestagger = 0x02;
- dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
- dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
- dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
- dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
+ hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+ hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
+ hw_state->pll1 = PORT_PLL_N(clk_div->n);
+ hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
if (clk_div->m2 & 0x3fffff)
- dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
+ hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
- dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
+ hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
PORT_PLL_INT_COEFF(int_coef) |
PORT_PLL_GAIN_CTL(gain_ctl);
- dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
+ hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
- dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
+ hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
- dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
+ hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
PORT_PLL_DCO_AMP_OVR_EN_H;
- dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+ hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
- dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
+ hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
return 0;
}
static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
struct dpll clock;
clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
+ if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
+ hw_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
}
@@ -2402,28 +2448,26 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
/* DSI non-SSC ref 19.2MHz */
}
-static void bxt_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void bxt_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
+
+ drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0, hw_state->ebb4,
+ hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
+ hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
+ hw_state->pcsdw12);
}
-static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct bxt_dpll_hw_state *a = &_a->bxt;
+ const struct bxt_dpll_hw_state *b = &_b->bxt;
+
return a->ebb0 == b->ebb0 &&
a->ebb4 == b->ebb4 &&
a->pll0 == b->pll0 &&
@@ -2554,7 +2598,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
@@ -2706,7 +2750,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
* The PLL outputs multiple frequencies at the same time, selection is
@@ -2777,17 +2821,18 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int ref_clock = icl_wrpll_ref_clock(i915);
u32 dco_fraction;
u32 p0, p1, p2, dco_freq;
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+ p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -2819,10 +2864,10 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
break;
}
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
ref_clock;
- dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
if (ehl_combo_pll_div_frac_wa_needed(i915))
@@ -2838,33 +2883,34 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
static void icl_calc_dpll_state(struct drm_i915_private *i915,
const struct skl_wrpll_params *pll_params,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 dco_fraction = pll_params->dco_fraction;
if (ehl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
- pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
+ hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
pll_params->dco_integer;
- pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
+ hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
if (DISPLAY_VER(i915) >= 12)
- pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
- pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
if (i915->display.vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
+ hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state,
+ struct icl_dpll_hw_state *hw_state,
bool is_dkl)
{
static const u8 div1_vals[] = { 7, 5, 3, 2 };
@@ -2920,12 +2966,12 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
*target_dco_khz = dco;
- state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+ hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
- state->mg_clktop2_coreclkctl1 =
+ hw_state->mg_clktop2_coreclkctl1 =
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
- state->mg_clktop2_hsclkctl =
+ hw_state->mg_clktop2_hsclkctl =
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
hsdiv |
@@ -2943,9 +2989,10 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* adapted to integer-only calculation, that's why it looks so different.
*/
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
int refclk_khz = i915->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2960,7 +3007,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
int ret;
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state, is_dkl);
+ hw_state, is_dkl);
if (ret)
return ret;
@@ -3050,61 +3097,61 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
/* write pll_state calculations */
if (is_dkl) {
- pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
if (i915->display.vbt.override_afc_startup) {
u8 val = i915->display.vbt.override_afc_startup_val;
- pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
- pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
- pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
(use_ssc ? DKL_PLL_SSC_EN : 0);
- pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
} else {
- pll_state->mg_pll_div0 =
+ hw_state->mg_pll_div0 =
(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
MG_PLL_DIV0_FBDIV_INT(m2div_int);
- pll_state->mg_pll_div1 =
+ hw_state->mg_pll_div1 =
MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
MG_PLL_DIV1_DITHER_DIV_2 |
MG_PLL_DIV1_NDIVRATIO(1) |
MG_PLL_DIV1_FBPREDIV(m1div);
- pll_state->mg_pll_lf =
+ hw_state->mg_pll_lf =
MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
MG_PLL_LF_AFCCNTSEL_512 |
MG_PLL_LF_GAINCTRL(1) |
MG_PLL_LF_INT_COEFF(int_coeff) |
MG_PLL_LF_PROP_COEFF(prop_coeff);
- pll_state->mg_pll_frac_lock =
+ hw_state->mg_pll_frac_lock =
MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
MG_PLL_FRAC_LOCK_DCODITHEREN |
MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |=
+ hw_state->mg_pll_frac_lock |=
MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
- pll_state->mg_pll_ssc =
+ hw_state->mg_pll_ssc =
(use_ssc ? MG_PLL_SSC_EN : 0) |
MG_PLL_SSC_TYPE(2) |
MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
@@ -3112,14 +3159,14 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias =
+ hw_state->mg_pll_tdc_coldst_bias =
MG_PLL_TDC_COLDST_COLDSTART |
MG_PLL_TDC_COLDST_IREFINT_EN |
MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
MG_PLL_TDC_TDCOVCCORR_EN |
MG_PLL_TDC_TDCSEL(3);
- pll_state->mg_pll_bias =
+ hw_state->mg_pll_bias =
MG_PLL_BIAS_BIAS_GB_SEL(3) |
MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
MG_PLL_BIAS_BIAS_BONUS(10) |
@@ -3129,17 +3176,17 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_BIAS_IREFTRIM(iref_trim);
if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask =
+ hw_state->mg_pll_tdc_coldst_bias_mask =
MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ hw_state->mg_pll_bias_mask = 0;
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
}
- pll_state->mg_pll_tdc_coldst_bias &=
- pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ hw_state->mg_pll_tdc_coldst_bias &=
+ hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
}
return 0;
@@ -3147,31 +3194,32 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
ref_clock = i915->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(i915) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+ m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
+ if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = hw_state->mg_pll_bias &
DKL_PLL_BIAS_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
} else {
m2_frac = 0;
}
} else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
+ if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = hw_state->mg_pll_div0 &
MG_PLL_DIV0_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
} else {
@@ -3179,7 +3227,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
}
}
- switch (pll_state->mg_clktop2_hsclkctl &
+ switch (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
@@ -3194,11 +3242,11 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
div1 = 7;
break;
default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (pll_state->mg_clktop2_hsclkctl &
+ div2 = (hw_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
@@ -3389,7 +3437,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
@@ -3408,8 +3455,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
- encoder->port));
+ dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(dpll_id));
@@ -3435,15 +3481,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_compute_combo_phy_dpll(state, crtc);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_compute_tc_phy_dplls(state, crtc);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return 0;
}
@@ -3452,15 +3495,12 @@ static int icl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
-
- if (intel_phy_is_combo(i915, phy))
+ if (intel_encoder_is_combo(encoder))
return icl_get_combo_phy_dpll(state, crtc, encoder);
- else if (intel_phy_is_tc(i915, phy))
+ else if (intel_encoder_is_tc(encoder))
return icl_get_tc_phy_dplls(state, crtc, encoder);
- MISSING_CASE(phy);
+ MISSING_CASE(encoder->port);
return -EINVAL;
}
@@ -3493,8 +3533,9 @@ static void icl_put_dplls(struct intel_atomic_state *state,
static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3559,8 +3600,9 @@ out:
static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
@@ -3630,9 +3672,10 @@ out:
static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state,
+ struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
{
+ struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
bool ret = false;
@@ -3690,24 +3733,24 @@ out:
static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
+ return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
static void icl_dpll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
@@ -3747,9 +3790,9 @@ static void icl_dpll_write(struct drm_i915_private *i915,
}
static void icl_mg_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
/*
@@ -3790,9 +3833,9 @@ static void icl_mg_pll_write(struct drm_i915_private *i915,
}
static void dkl_pll_write(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct icl_dpll_hw_state *hw_state)
{
- struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
u32 val;
@@ -3905,13 +3948,15 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
}
static void combo_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3927,11 +3972,14 @@ static void combo_pll_enable(struct drm_i915_private *i915,
}
static void tbt_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
- icl_dpll_write(i915, pll);
+ icl_dpll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3945,16 +3993,18 @@ static void tbt_pll_enable(struct drm_i915_private *i915,
}
static void mg_pll_enable(struct drm_i915_private *i915,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
icl_pll_power_enable(i915, pll, enable_reg);
if (DISPLAY_VER(i915) >= 12)
- dkl_pll_write(i915, pll);
+ dkl_pll_write(i915, pll, hw_state);
else
- icl_mg_pll_write(i915, pll);
+ icl_mg_pll_write(i915, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -4026,33 +4076,36 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
-static void icl_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+static void icl_dump_hw_state(struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
- drm_dbg_kms(&i915->drm,
- "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->div0,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
-}
-
-static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
- const struct intel_dpll_hw_state *b)
+ const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
+
+ drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
+ const struct intel_dpll_hw_state *_b)
{
+ const struct icl_dpll_hw_state *a = &_a->icl;
+ const struct icl_dpll_hw_state *b = &_b->icl;
+
/* FIXME split combo vs. mg more thoroughly */
return a->cfgcr0 == b->cfgcr0 &&
a->cfgcr1 == b->cfgcr1 &&
@@ -4417,33 +4470,33 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
* intel_dpll_get_freq - calculate the DPLL's output frequency
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @pll_state: DPLL state from which to calculate the output frequency
+ * @dpll_hw_state: DPLL state from which to calculate the output frequency
*
- * Return the output frequency corresponding to @pll's passed in @pll_state.
+ * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
return 0;
- return pll->info->funcs->get_freq(i915, pll, pll_state);
+ return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
}
/**
* intel_dpll_get_hw_state - readout the DPLL's hardware state
* @i915: i915 device
* @pll: DPLL for which to calculate the output frequency
- * @hw_state: DPLL's hardware state
+ * @dpll_hw_state: DPLL's hardware state
*
- * Read out @pll's hardware state into @hw_state.
+ * Read out @pll's hardware state into @dpll_hw_state.
*/
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *dpll_hw_state)
{
- return pll->info->funcs->get_hw_state(i915, pll, hw_state);
+ return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
}
static void readout_dpll_hw_state(struct drm_i915_private *i915,
@@ -4514,22 +4567,24 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
}
/**
- * intel_dpll_dump_hw_state - write hw_state to dmesg
+ * intel_dpll_dump_hw_state - dump hw_state
* @i915: i915 drm device
- * @hw_state: hw state to be written to the log
+ * @p: where to print the state to
+ * @dpll_hw_state: hw state to be dumped
*
- * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
+ * Dumo out the relevant values in @dpll_hw_state.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state)
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state)
{
if (i915->display.dpll.mgr) {
- i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
+ i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- ibx_dump_hw_state(i915, hw_state);
+ ibx_dump_hw_state(p, dpll_hw_state);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index cc0e1386309d..f09e513ce05b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -36,6 +36,7 @@
enum tc_port;
struct drm_i915_private;
+struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -180,18 +181,19 @@ enum icl_port_dpll_id {
ICL_PORT_DPLL_COUNT,
};
-struct intel_dpll_hw_state {
- /* i9xx, pch plls */
+struct i9xx_dpll_hw_state {
u32 dpll;
u32 dpll_md;
u32 fp0;
u32 fp1;
+};
- /* hsw, bdw */
+struct hsw_dpll_hw_state {
u32 wrpll;
u32 spll;
+};
- /* skl */
+struct skl_dpll_hw_state {
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of ctrl1 and they get shifted into position when writing
@@ -201,20 +203,18 @@ struct intel_dpll_hw_state {
u32 ctrl1;
/* HDMI only, 0 when used for DP */
u32 cfgcr1, cfgcr2;
+};
- /* icl */
- u32 cfgcr0;
+struct bxt_dpll_hw_state {
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
+};
+
+struct icl_dpll_hw_state {
+ u32 cfgcr0, cfgcr1;
/* tgl */
u32 div0;
- /* bxt */
- u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
-
- /*
- * ICL uses the following, already defined:
- * u32 cfgcr0, cfgcr1;
- */
u32 mg_refclkin_ctl;
u32 mg_clktop2_coreclkctl1;
u32 mg_clktop2_hsclkctl;
@@ -229,6 +229,55 @@ struct intel_dpll_hw_state {
u32 mg_pll_tdc_coldst_bias_mask;
};
+struct intel_mpllb_state {
+ u32 clock; /* in KHz */
+ u32 ref_control;
+ u32 mpllb_cp;
+ u32 mpllb_div;
+ u32 mpllb_div2;
+ u32 mpllb_fracn1;
+ u32 mpllb_fracn2;
+ u32 mpllb_sscen;
+ u32 mpllb_sscstep;
+};
+
+struct intel_c10pll_state {
+ u32 clock; /* in KHz */
+ u8 tx;
+ u8 cmn;
+ u8 pll[20];
+};
+
+struct intel_c20pll_state {
+ u32 clock; /* in kHz */
+ u16 tx[3];
+ u16 cmn[4];
+ union {
+ u16 mplla[10];
+ u16 mpllb[11];
+ };
+};
+
+struct intel_cx0pll_state {
+ union {
+ struct intel_c10pll_state c10;
+ struct intel_c20pll_state c20;
+ };
+ bool ssc_enabled;
+};
+
+struct intel_dpll_hw_state {
+ union {
+ struct i9xx_dpll_hw_state i9xx;
+ struct hsw_dpll_hw_state hsw;
+ struct skl_dpll_hw_state skl;
+ struct bxt_dpll_hw_state bxt;
+ struct icl_dpll_hw_state icl;
+ struct intel_mpllb_state mpllb;
+ struct intel_cx0pll_state cx0pll;
+ };
+};
+
/**
* struct intel_shared_dpll_state - hold the DPLL atomic state
*
@@ -364,10 +413,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder);
int intel_dpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state);
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
+ struct intel_dpll_hw_state *dpll_hw_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
@@ -377,7 +426,8 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915);
void intel_dpll_sanitize_state(struct drm_i915_private *i915);
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
- const struct intel_dpll_hw_state *hw_state);
+ struct drm_printer *p,
+ const struct intel_dpll_hw_state *dpll_hw_state);
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 169ef38ff188..597f8bd6aa1a 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
@@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(crtc_state->has_drrs ||
- HAS_DOUBLE_BUFFERED_M_N(i915) ||
- intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+ str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 8ef5f93a80ff..0982f95eab72 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -9,12 +9,15 @@
#include <linux/types.h>
enum drrs_type;
+enum transcoder;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
+bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d62e050185e7..4baaa92ceaec 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -340,6 +340,18 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
+static u32 dsb_chicken(struct intel_crtc *crtc)
+{
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR)
+ return DSB_SKIP_WAITS_EN |
+ DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return DSB_SKIP_WAITS_EN;
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
@@ -361,6 +373,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
+ intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
+ dsb_chicken(crtc));
+
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index d3cf6a652221..bd5888ce4852 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -64,14 +64,11 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
drm_dbg_kms(&dev_priv->drm, "\n");
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(intel_connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c076da75b066..1840f5b59229 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -223,7 +223,7 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
int target_clock = mode->clock;
enum drm_mode_status status;
@@ -231,9 +231,6 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
/* XXX: Validate clock range */
if (fixed_mode) {
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 3ea6470d6d92..86b443433e8b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1106,7 +1106,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *i915 = to_i915(fb->dev);
unsigned int height;
- u32 alignment;
+ u32 alignment, unused;
if (DISPLAY_VER(i915) >= 12 &&
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
@@ -1128,8 +1128,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
height = ALIGN(height, intel_tile_height(fb, color_plane));
/* Catch potential overflows early */
- if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
- fb->offsets[color_plane])) {
+ if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane], &unused)) {
drm_dbg_kms(&i915->drm,
"Bad offset 0x%08x or pitch %d for color plane %d\n",
fb->offsets[color_plane], fb->pitches[color_plane],
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37d2f..b6df9baf481b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b453fcbd67da..151dcd0c45b6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -54,6 +54,7 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
+#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
#define for_each_fbc_id(__dev_priv, __fbc_id) \
@@ -826,10 +827,36 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:skl,bxt
+ * Display WA #0883: skl,bxt
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_DISABLE_DUMMY0);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /*
+ * WaFbcNukeOnHostModify:skl,kbl,cfl
+ * Display WA #0873: skl,kbl,cfl
+ */
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_NUKE_ON_ANY_MODIFICATION);
+ }
+
+ /* Wa_1409120013:icl,jsl,tgl,dg1 */
+ if (IS_DISPLAY_VER(i915, 11, 12))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
- intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
- DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+ if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
+ intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
new file mode 100644
index 000000000000..ae0699c3c2fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_FBC_REGS__
+#define __INTEL_FBC_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
+#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
+#define FBC_CONTROL _MMIO(0x3208)
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
+#define FBC_COMMAND _MMIO(0x320c)
+#define FBC_CMD_COMPRESS REG_BIT(0)
+#define FBC_STATUS _MMIO(0x3210)
+#define FBC_STAT_COMPRESSING REG_BIT(31)
+#define FBC_STAT_COMPRESSED REG_BIT(30)
+#define FBC_STAT_MODIFIED REG_BIT(29)
+#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
+#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
+#define FBC_CTL_FENCE_DBL REG_BIT(4)
+#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
+#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
+#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
+#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
+#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
+#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
+#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
+#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
+#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
+#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
+#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
+#define FBC_MOD_NUM_VALID REG_BIT(0)
+#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
+#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
+#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
+#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
+#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
+#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
+
+#define FBC_LL_SIZE (1536)
+
+#define DPFC_CB_BASE _MMIO(0x3200)
+#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
+#define DPFC_CONTROL _MMIO(0x3208)
+#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
+#define DPFC_CTL_EN REG_BIT(31)
+#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
+#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
+#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
+#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
+#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
+#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
+#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
+#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
+#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
+#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
+#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
+#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
+#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
+#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
+#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
+#define DPFC_RECOMP_CTL _MMIO(0x320c)
+#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
+#define DPFC_RECOMP_STALL_EN REG_BIT(27)
+#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
+#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
+#define DPFC_STATUS _MMIO(0x3210)
+#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
+#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
+#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
+#define DPFC_STATUS2 _MMIO(0x3214)
+#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
+#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
+#define DPFC_FENCE_YOFF _MMIO(0x3218)
+#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
+#define DPFC_CHICKEN _MMIO(0x3224)
+#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
+#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
+#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
+#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
+#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
+
+#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
+#define FBC_STRIDE_OVERRIDE REG_BIT(15)
+#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
+#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+
+#define ILK_FBC_RT_BASE _MMIO(0x2128)
+#define ILK_FBC_RT_VALID REG_BIT(0)
+#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
+
+#define SNB_DPFC_CTL_SA _MMIO(0x100100)
+#define SNB_DPFC_FENCE_EN REG_BIT(29)
+#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
+#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
+#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
+
+#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
+
+#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
+#define FBC_REND_NUKE REG_BIT(2)
+#define FBC_REND_CACHE_CLEAN REG_BIT(1)
+
+#endif /* __INTEL_FBC_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 99894a855ef0..bda702c2cab8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -24,7 +24,6 @@
* David Airlie
*/
-#include <linux/async.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -39,6 +38,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -58,7 +58,6 @@ struct intel_fbdev {
struct intel_framebuffer *fb;
struct i915_vma *vma;
unsigned long vma_flags;
- async_cookie_t cookie;
int preferred_bpp;
/* Whether or not fbdev hpd processing is temporarily suspended */
@@ -135,6 +134,29 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
return i915_gem_fb_mmap(obj, vma);
}
+static void intel_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ /*
+ * We rely on the object-free to release the VMA pinning for
+ * the info->screen_base mmaping. Leaking the VMA is simpler than
+ * trying to rectify all the possible error paths leading here.
+ */
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
+ drm_framebuffer_remove(&ifbdev->fb->base);
+
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ kfree(ifbdev);
+}
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops");
+
static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@@ -144,8 +166,11 @@ static const struct fb_ops intelfb_ops = {
.fb_pan_display = intel_fbdev_pan_display,
__FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev),
.fb_mmap = intel_fbdev_mmap,
+ .fb_destroy = intel_fbdev_fb_destroy,
};
+__diag_pop();
+
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -153,7 +178,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -245,7 +269,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- vga_switcheroo_client_fb_set(pdev, info);
+
return 0;
out_unpin:
@@ -271,25 +295,6 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_dirty = intelfb_dirty,
};
-static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
-{
- /* We rely on the object-free to release the VMA pinning for
- * the info->screen_base mmaping. Leaking the VMA is simpler than
- * trying to rectify all the possible error paths leading here.
- */
-
- drm_fb_helper_fini(&ifbdev->helper);
-
- if (ifbdev->vma)
- intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
-
- if (ifbdev->fb)
- drm_framebuffer_remove(&ifbdev->fb->base);
-
- drm_fb_helper_unprepare(&ifbdev->helper);
- kfree(ifbdev);
-}
-
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
@@ -453,93 +458,6 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
true);
}
-int intel_fbdev_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev;
- int ret;
-
- if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
- return -ENODEV;
-
- ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
- if (ifbdev == NULL)
- return -ENOMEM;
-
- mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
-
- if (intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
- else
- ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
-
- ret = drm_fb_helper_init(dev, &ifbdev->helper);
- if (ret) {
- kfree(ifbdev);
- return ret;
- }
-
- dev_priv->display.fbdev.fbdev = ifbdev;
- INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
-
- return 0;
-}
-
-static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
-{
- struct intel_fbdev *ifbdev = data;
-
- /* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper))
- intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
-}
-
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
-}
-
-static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
-{
- if (!ifbdev->cookie)
- return;
-
- /* Only serialises with all preceding async calls, hence +1 */
- async_synchronize_cookie(ifbdev->cookie + 1);
- ifbdev->cookie = 0;
-}
-
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
-
- if (!ifbdev)
- return;
-
- intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
-
- if (!current_is_async())
- intel_fbdev_sync(ifbdev);
-
- drm_fb_helper_unregister_info(&ifbdev->helper);
-}
-
-void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
-
- if (!ifbdev)
- return;
-
- intel_fbdev_destroy(ifbdev);
-}
-
/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
@@ -622,15 +540,13 @@ set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
-void intel_fbdev_output_poll_changed(struct drm_device *dev)
+static int intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
- return;
-
- intel_fbdev_sync(ifbdev);
+ return -EINVAL;
mutex_lock(&ifbdev->hpd_lock);
send_hpd = !ifbdev->hpd_suspended;
@@ -639,21 +555,137 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
drm_fb_helper_hotplug_event(&ifbdev->helper);
+
+ return 0;
}
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
+static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ int ret;
if (!ifbdev)
- return;
+ return -EINVAL;
- intel_fbdev_sync(ifbdev);
if (!ifbdev->vma)
+ return -ENOMEM;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
+ if (ret)
+ return ret;
+
+ intel_fbdev_invalidate(ifbdev);
+
+ return 0;
+}
+
+/*
+ * Fbdev client and struct drm_client_funcs
+ */
+
+static void intel_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = fb_helper->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (fb_helper->info) {
+ vga_switcheroo_client_fb_set(pdev, NULL);
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_fb_helper_unprepare(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int intel_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_i915_private *dev_priv = to_i915(client->dev);
+ int ret;
+
+ ret = intel_fbdev_restore_mode(dev_priv);
+ if (ret)
+ return ret;
+
+ vga_switcheroo_process_delayed_switch();
+
+ return 0;
+}
+
+static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int ret;
+
+ if (dev->fb_helper)
+ return intel_fbdev_output_poll_changed(dev);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ vga_switcheroo_client_fb_set(pdev, fb_helper->info);
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs intel_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = intel_fbdev_client_unregister,
+ .restore = intel_fbdev_client_restore,
+ .hotplug = intel_fbdev_client_hotplug,
+};
+
+void intel_fbdev_setup(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ struct intel_fbdev *ifbdev;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
return;
- if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
- intel_fbdev_invalidate(ifbdev);
+ ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
+
+ i915->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
+ mutex_init(&ifbdev->hpd_lock);
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
+
+ ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
+ &intel_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_fb_helper_unprepare;
+ }
+
+ drm_client_register(&ifbdev->helper.client);
+
+ return;
+
+err_drm_fb_helper_unprepare:
+ drm_fb_helper_unprepare(&ifbdev->helper);
+ mutex_destroy(&ifbdev->hpd_lock);
+ kfree(ifbdev);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 04fd523a5023..08de2d5b3433 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -14,29 +14,11 @@ struct intel_fbdev;
struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
-void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
}
@@ -44,13 +26,6 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
-{
-}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/display/intel_fixed.h
index a327094de2bd..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/display/intel_fixed.h
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d3e03ed5b79c..9c8e1e91ff1c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -411,7 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
intel_de_write_fw(i915, GMBUS4(i915), 0);
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 9edac27bab26..d5ed4c7dfbc0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -369,9 +369,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
- HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
- 10, 1, &val);
+ ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
if (ret)
return ret;
else if (!(val & HDCP_KEY_LOAD_STATUS))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 302bff75b06c..35823e1f65d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -13,6 +13,12 @@
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd_in;
+ void *hdcp_cmd_out;
+};
+
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 14;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index eba2057c5a9e..5f610df61cc9 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -10,12 +10,7 @@
#include <linux/types.h>
struct drm_i915_private;
-
-struct intel_hdcp_gsc_message {
- struct i915_vma *vma;
- void *hdcp_cmd_in;
- void *hdcp_cmd_out;
-};
+struct intel_hdcp_gsc_message;
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 90d2236fede3..5f6deceaf8ba 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -114,6 +114,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GAMUT;
case DP_SDP_VSC:
return 0;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -137,6 +139,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ENABLE_AS_ADL;
case DP_SDP_PPS:
return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
@@ -164,6 +168,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_ADAPTIVE_SYNC:
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
@@ -186,6 +192,8 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return VIDEO_DIP_ASYNC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
@@ -563,6 +571,9 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
+ if (HAS_AS_SDP(dev_priv))
+ mask |= VIDEO_DIP_ENABLE_AS_ADL;
+
return val & mask;
}
@@ -570,6 +581,7 @@ static const u8 infoframe_type_to_idx[] = {
HDMI_PACKET_TYPE_GENERAL_CONTROL,
HDMI_PACKET_TYPE_GAMUT_METADATA,
DP_SDP_VSC,
+ DP_SDP_ADAPTIVE_SYNC,
HDMI_INFOFRAME_TYPE_AVI,
HDMI_INFOFRAME_TYPE_SPD,
HDMI_INFOFRAME_TYPE_VENDOR,
@@ -1212,7 +1224,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
- VIDEO_DIP_ENABLE_DRM_GLK);
+ VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
intel_de_write(dev_priv, reg, val);
@@ -1832,7 +1844,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
- enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1854,11 +1866,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
- if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
+ if (intel_encoder_is_combo(encoder) && clock > 500000 && clock < 533200)
return MODE_CLOCK_RANGE;
/* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
- if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
+ if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
return MODE_CLOCK_RANGE;
/*
@@ -1981,7 +1993,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2664,8 +2676,9 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
drm_scdc_set_scrambling(connector, scrambling);
}
-static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2686,8 +2699,9 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 bxt_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2705,9 +2719,9 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2731,22 +2745,23 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
- if (intel_phy_is_combo(dev_priv, phy))
+ if (intel_encoder_is_combo(encoder))
return GMBUS_PIN_1_BXT + port;
- else if (intel_phy_is_tc(dev_priv, phy))
- return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+ else if (intel_encoder_is_tc(encoder))
+ return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
-static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
u8 ddc_pin;
switch (phy) {
@@ -2767,11 +2782,12 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
-static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_C);
+ WARN_ON(encoder->port == PORT_C);
/*
* Pin mapping for RKL depends on which PCH is present. With TGP, the
@@ -2785,11 +2801,12 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return GMBUS_PIN_1_BXT + phy;
}
-static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(i915, port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, port == PORT_A);
+ drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2803,16 +2820,16 @@ static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port po
return GMBUS_PIN_1_BXT + phy;
}
-static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 dg1_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- return intel_port_to_phy(dev_priv, port) + 1;
+ return intel_encoder_to_phy(encoder) + 1;
}
-static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+static u8 adls_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_encoder_to_phy(encoder);
- WARN_ON(port == PORT_B || port == PORT_C);
+ WARN_ON(encoder->port == PORT_B || encoder->port == PORT_C);
/*
* Pin mapping for ADL-S requires TC pins for all combo phy outputs
@@ -2824,9 +2841,9 @@ static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port
return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
}
-static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ enum port port = encoder->port;
u8 ddc_pin;
switch (port) {
@@ -2850,30 +2867,29 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u8 ddc_pin;
if (IS_ALDERLAKE_S(dev_priv))
- ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = adls_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
- ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = rkl_encoder_to_ddc_pin(encoder);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
- ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
- ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = mcc_encoder_to_ddc_pin(encoder);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = icl_encoder_to_ddc_pin(encoder);
else if (HAS_PCH_CNP(dev_priv))
- ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = cnp_encoder_to_ddc_pin(encoder);
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = bxt_encoder_to_ddc_pin(encoder);
else if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
- ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
+ ddc_pin = g4x_encoder_to_ddc_pin(encoder);
return ddc_pin;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 76076509f771..d270bb7b9462 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1444,7 +1444,7 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 5863763de530..93e6cac9a4ed 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -72,7 +72,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_reg.h"
+#include "intel_audio_regs.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 221f5c6c871b..8b8959073466 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -392,16 +392,13 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index fcbb083318a7..68bd5101ec89 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -27,7 +27,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/firmware.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
@@ -263,7 +262,6 @@ struct intel_opregion {
struct opregion_asle *asle;
struct opregion_asle_ext *asle_ext;
void *rvda;
- void *vbt_firmware;
const void *vbt;
u32 vbt_size;
struct work_struct asle_work;
@@ -869,46 +867,6 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = dev_priv->display.opregion;
- const struct firmware *fw = NULL;
- const char *name = dev_priv->display.params.vbt_firmware;
- int ret;
-
- if (!name || !*name)
- return -ENOENT;
-
- ret = request_firmware(&fw, name, dev_priv->drm.dev);
- if (ret) {
- drm_err(&dev_priv->drm,
- "Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
- return ret;
- }
-
- if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
- opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (opregion->vbt_firmware) {
- drm_dbg_kms(&dev_priv->drm,
- "Found valid VBT firmware \"%s\"\n", name);
- opregion->vbt = opregion->vbt_firmware;
- opregion->vbt_size = fw->size;
- ret = 0;
- } else {
- ret = -ENOMEM;
- }
- } else {
- drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
- name);
- ret = -EINVAL;
- }
-
- release_firmware(fw);
-
- return ret;
-}
-
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion;
@@ -1006,9 +964,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
}
- if (intel_load_vbt_firmware(dev_priv) == 0)
- goto out;
-
if (dmi_check_system(intel_no_opregion_vbt))
goto out;
@@ -1176,6 +1131,16 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
+bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = i915->display.opregion;
+
+ if (!opregion || !opregion->vbt)
+ return false;
+
+ return true;
+}
+
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
struct intel_opregion *opregion = i915->display.opregion;
@@ -1186,7 +1151,7 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
if (size)
*size = opregion->vbt_size;
- return opregion->vbt;
+ return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
@@ -1312,7 +1277,6 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
memunmap(opregion->header);
if (opregion->rvda)
memunmap(opregion->rvda);
- kfree(opregion->vbt_firmware);
kfree(opregion);
i915->display.opregion = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 0bec224f711f..4b2b8e752632 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -53,6 +53,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
+bool intel_opregion_vbt_present(struct drm_i915_private *i915);
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -119,6 +120,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+{
+ return false;
+}
+
static inline const void *
intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2b1392d5a902..1c2099ed5514 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -972,10 +972,11 @@ static int check_overlay_dst(struct intel_overlay *overlay,
rec->dst_width, rec->dst_height);
clipped = req;
- drm_rect_intersect(&clipped, &crtc_state->pipe_src);
- if (!drm_rect_visible(&clipped) ||
- !drm_rect_equals(&clipped, &req))
+ if (!drm_rect_intersect(&clipped, &crtc_state->pipe_src))
+ return -EINVAL;
+
+ if (!drm_rect_equals(&clipped, &req))
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 073ea3166c36..6f4ff6a89c32 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -47,10 +47,12 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->display.params.panel_use_ssc >= 0)
- return i915->display.params.panel_use_ssc != 0;
- return i915->display.vbt.lvds_use_ssc &&
- !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
+ struct intel_display *display = &i915->display;
+
+ if (display->params.panel_use_ssc >= 0)
+ return display->params.panel_use_ssc != 0;
+ return display->vbt.lvds_use_ssc &&
+ !intel_has_quirk(display, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index baf679759e00..826e38a9e6a4 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -474,7 +474,7 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
- i9xx_crtc_clock_get(crtc, crtc_state);
+ i9xx_crtc_clock_get(crtc_state);
/*
* In case there is an active pipe without active ports,
@@ -529,7 +529,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
&crtc_state->dpll_hw_state);
drm_WARN_ON(&dev_priv->drm, !pll_active);
- tmp = crtc_state->dpll_hw_state.dpll;
+ tmp = crtc_state->dpll_hw_state.i9xx.dpll;
crtc_state->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 744e332fa2af..9ca981b7a12c 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -119,10 +119,11 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
if (!encoder)
return;
- phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_tc(i915, phy))
+ if (intel_encoder_is_tc(encoder))
return;
+ phy = intel_encoder_to_phy(encoder);
+
if (set_bit)
pmdemand_state->active_combo_phys_mask |= BIT(phy);
else
@@ -222,14 +223,7 @@ static bool
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
struct intel_encoder *encoder)
{
- enum phy phy;
-
- if (!encoder)
- return false;
-
- phy = intel_port_to_phy(i915, encoder->port);
-
- return intel_phy_is_tc(i915, phy);
+ return encoder && intel_encoder_is_tc(encoder);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index 2941a1a18b72..128fd61f8f14 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -43,9 +43,8 @@ struct intel_pmdemand_state {
struct pmdemand_params params;
};
-#define to_intel_pmdemand_state(x) container_of((x), \
- struct intel_pmdemand_state, \
- base)
+#define to_intel_pmdemand_state(global_state) \
+ container_of_const((global_state), struct intel_pmdemand_state, base)
void intel_pmdemand_init_early(struct drm_i915_private *i915);
int intel_pmdemand_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 2d65a538f83e..0ccbf9a85914 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -605,8 +605,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
+ if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
drm_err(&dev_priv->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
@@ -1351,7 +1350,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
struct edp_power_seq *vbt)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
*vbt = connector->panel.vbt.edp.pps;
@@ -1364,9 +1363,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
+ if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
vbt->t11_t12);
}
@@ -1671,6 +1670,37 @@ void intel_pps_setup(struct drm_i915_private *i915)
i915->display.pps.mmio_base = PPS_BASE;
}
+static int intel_pps_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->pps.panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->pps.backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->pps.backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_pps);
+
+void intel_pps_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
+ connector, &intel_pps_fops);
+}
+
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index a2c2467e3c22..07ef96ca8da2 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -51,6 +51,8 @@ void vlv_pps_init(struct intel_encoder *encoder,
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_connector_debugfs_add(struct intel_connector *connector);
+
void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6927785fd6ff..f5b33335a9ae 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -171,14 +171,27 @@
*
* The rest of the bits are more self-explanatory and/or
* irrelevant for normal operation.
+ *
+ * Description of intel_crtc_state variables. has_psr, has_panel_replay and
+ * has_sel_update:
+ *
+ * has_psr (alone): PSR1
+ * has_psr + has_sel_update: PSR2
+ * has_psr + has_panel_replay: Panel Replay
+ * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
+ *
+ * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * sel_update_enabled
+ *
+ * enabled (alone): PSR1
+ * enabled + sel_update_enabled: PSR2
+ * enabled + panel_replay_enabled: Panel Replay
+ * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
*/
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
-#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
- (intel_dp)->psr.source_panel_replay_support)
-
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -330,6 +343,9 @@ static void psr_irq_control(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
@@ -619,40 +635,59 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
return false;
}
-static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.panel_replay_enabled ?
+ PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
+}
+
+/*
+ * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
+ * are relying on PSR definitions on these "common" bits.
+ */
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- if (intel_dp->psr.panel_replay_enabled)
- return;
-
- if (intel_dp->psr.psr2_enabled) {
+ if (crtc_state->has_psr2) {
/* Enable ALPM at sink for psr2 */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE |
- DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+ if (!crtc_state->has_panel_replay) {
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
+ if (psr2_su_region_et_valid(intel_dp))
+ dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
+ }
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
- if (psr2_su_region_et_valid(intel_dp))
- dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ if (crtc_state->has_panel_replay)
+ dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
+ DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
if (intel_dp->psr.entry_setup_frames > 0)
dpcd_val |= DP_PSR_FRAME_CAPTURE;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp),
+ dpcd_val);
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (intel_dp_is_edp(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
@@ -1126,6 +1161,141 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+/*
+ * See Bspec: 71632 for the table
+ *
+ * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
+ *
+ * Half cycle duration:
+ *
+ * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
+ * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
+ *
+ * Link rates 5.4 - 8.1
+ * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
+ * LFPS Period chosen is the mid-point of the min:max values from the table
+ * FLOOR( LFPS Period in Symbol clocks /
+ * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
+ */
+static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
+ int *silence_period,
+ int *lfps_half_cycle)
+{
+ switch (link_rate) {
+ case 162000:
+ *silence_period = 20;
+ *lfps_half_cycle = 5;
+ break;
+ case 216000:
+ *silence_period = 27;
+ *lfps_half_cycle = 7;
+ break;
+ case 243000:
+ *silence_period = 31;
+ *lfps_half_cycle = 8;
+ break;
+ case 270000:
+ *silence_period = 34;
+ *lfps_half_cycle = 9;
+ break;
+ case 324000:
+ *silence_period = 41;
+ *lfps_half_cycle = 11;
+ break;
+ case 432000:
+ *silence_period = 56;
+ *lfps_half_cycle = 15;
+ break;
+ case 540000:
+ *silence_period = 69;
+ *lfps_half_cycle = 12;
+ break;
+ case 648000:
+ *silence_period = 84;
+ *lfps_half_cycle = 15;
+ break;
+ case 675000:
+ *silence_period = 87;
+ *lfps_half_cycle = 15;
+ break;
+ case 810000:
+ *silence_period = 104;
+ *lfps_half_cycle = 19;
+ break;
+ default:
+ *silence_period = *lfps_half_cycle = -1;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
+ * tSilence, Max+ tPHY Establishment + tCDS) / tline)
+ * For the "PHY P2 to P0" latency see the PHY Power Control page
+ * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
+ * : 12 us
+ * The tLFPS_Period, Max term is 800ns
+ * The tSilence, Max term is 180ns
+ * The tPHY Establishment (a.k.a. t1) term is 50us
+ * The tCDS term is 1 or 2 times t2
+ * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
+ * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
+ * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
+ * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
+ * within the CDS period complete within the CDS period regardless of
+ * entry into the period
+ * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
+ * TPS4 Length = 252 Symbols
+ */
+static int _lnl_compute_aux_less_wake_time(int port_clock)
+{
+ int tphy2_p2_to_p0 = 12 * 1000;
+ int tlfps_period_max = 800;
+ int tsilence_max = 180;
+ int t1 = 50 * 1000;
+ int tps4 = 252;
+ int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
+ int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
+ int t2 = num_ml_phy_lock * tml_phy_lock;
+ int tcds = 1 * t2;
+
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
+ t1 + tcds, 1000);
+}
+
+static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int aux_less_wake_time, aux_less_wake_lines, silence_period,
+ lfps_half_cycle;
+
+ aux_less_wake_time =
+ _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
+ aux_less_wake_time);
+
+ if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
+ &silence_period,
+ &lfps_half_cycle))
+ return false;
+
+ if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
+ silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
+ lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
+ return false;
+
+ if (i915->display.params.psr_safest_params)
+ aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
+
+ intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
+ intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
+ intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+
+ return true;
+}
+
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1142,6 +1312,9 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (check_entry_lines > 15)
return false;
+ if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
+ return false;
+
if (i915->display.params.psr_safest_params)
check_entry_lines = 15;
@@ -1150,28 +1323,52 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
return true;
}
+/*
+ * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
+ * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
+ * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
+ */
+static int skl_io_buffer_wake_time(void)
+{
+ return 18;
+}
+
+static int tgl_io_buffer_wake_time(void)
+{
+ return 10;
+}
+
+static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_io_buffer_wake_time();
+ else
+ return skl_io_buffer_wake_time();
+}
+
static bool _compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ int tfw_exit_latency = 20; /* eDP spec */
+ int phy_wake = 4; /* eDP spec */
+ int preamble = 8; /* eDP spec */
+ int precharge = intel_dp_aux_fw_sync_len() - preamble;
u8 max_wake_lines;
- if (DISPLAY_VER(i915) >= 12) {
- io_wake_time = 42;
- /*
- * According to Bspec it's 42us, but based on testing
- * it is not enough -> use 45 us.
- */
- fast_wake_time = 45;
+ io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
+ preamble + phy_wake + tfw_exit_latency;
+ fast_wake_time = precharge + preamble + phy_wake +
+ tfw_exit_latency;
+ if (DISPLAY_VER(i915) >= 12)
/* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
- } else {
- io_wake_time = 50;
- fast_wake_time = 32;
+ else
max_wake_lines = 8;
- }
io_wake_lines = intel_usecs_to_scanlines(
&crtc_state->hw.adjusted_mode, io_wake_time);
@@ -1422,12 +1619,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR disabled due to bigjoiner\n");
+ return;
+ }
+
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
- else
- crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
- if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
+ crtc_state->has_psr = crtc_state->has_panel_replay ? true :
+ _psr_compute_config(intel_dp, crtc_state);
+
+ if (!crtc_state->has_psr)
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
@@ -1454,7 +1663,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
goto unlock;
if (intel_dp->psr.panel_replay_enabled) {
- pipe_config->has_panel_replay = true;
+ pipe_config->has_psr = pipe_config->has_panel_replay = true;
} else {
/*
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
@@ -1559,14 +1768,44 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
+ u32 alpm_ctl;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
+ !intel_dp_is_edp(intel_dp)))
return;
- intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
- ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
+ /*
+ * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
+ * check panel support at this point.
+ */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ alpm_ctl = ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
+
+ intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
+ PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
+ PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
+ PORT_ALPM_CTL_SILENCE_PERIOD(
+ psr->alpm_parameters.silence_period_sym_clocks));
+
+ intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
+ PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
+ psr->alpm_parameters.lfps_half_cycle_num_of_syms));
+ } else {
+ alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
+ }
+
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
+
+ intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1574,7 +1813,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- u32 mask;
+ u32 mask = 0;
/*
* Only HSW and BDW have PSR AUX registers that need to be setup.
@@ -1588,34 +1827,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
* can rely on frontbuffer tracking.
+ *
+ * From bspec prior LunarLake:
+ * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
+ * panel replay mode.
+ *
+ * From bspec beyod LunarLake:
+ * Panel Replay on DP: No bits are applicable
+ * Panel Replay on eDP: All bits are applicable
*/
- mask = EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD;
+ if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ mask = EDP_PSR_DEBUG_MASK_HPD;
- /*
- * For some unknown reason on HSW non-ULT (or at least on
- * Dell Latitude E6540) external displays start to flicker
- * when PSR is enabled on the eDP. SR/PC6 residency is much
- * higher than should be possible with an external display.
- * As a workaround leave LPSP unmasked to prevent PSR entry
- * when external displays are active.
- */
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_LPSP;
+ if (intel_dp_is_edp(intel_dp)) {
+ mask |= EDP_PSR_DEBUG_MASK_MEMUP;
- if (DISPLAY_VER(dev_priv) < 20)
- mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+ /*
+ * For some unknown reason on HSW non-ULT (or at least on
+ * Dell Latitude E6540) external displays start to flicker
+ * when PSR is enabled on the eDP. SR/PC6 residency is much
+ * higher than should be possible with an external display.
+ * As a workaround leave LPSP unmasked to prevent PSR entry
+ * when external displays are active.
+ */
+ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_LPSP;
- /*
- * No separate pipe reg write mask on hsw/bdw, so have to unmask all
- * registers in order to keep the CURSURFLIVE tricks working :(
- */
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
- mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+ if (DISPLAY_VER(dev_priv) < 20)
+ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- /* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
- mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+ * registers in order to keep the CURSURFLIVE tricks working :(
+ */
+ if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ /* allow PSR with sprite enabled */
+ if (IS_HASWELL(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+ }
intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
@@ -1634,7 +1885,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- lnl_alpm_configure(intel_dp);
+ if (intel_dp_is_edp(intel_dp))
+ lnl_alpm_configure(intel_dp);
/*
* Wa_16013835468
@@ -1675,6 +1927,9 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
+ if (intel_dp->psr.panel_replay_enabled)
+ goto no_err;
+
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
@@ -1692,6 +1947,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
return false;
}
+no_err:
return true;
}
@@ -1700,7 +1956,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@@ -1722,14 +1977,22 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.panel_replay_enabled) {
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
- else
+ } else {
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
- intel_psr_enable_sink(intel_dp);
+ /*
+ * Panel replay has to be enabled before link training: doing it
+ * only for PSR here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
+ }
+
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dig_port->base, true);
+
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
@@ -1799,8 +2062,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- enum phy phy = intel_port_to_phy(dev_priv,
- dp_to_dig_port(intel_dp)->base.port);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1835,12 +2096,25 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
- intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
+ if (intel_dp_is_edp(intel_dp))
+ intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
+
+ /* Panel Replay on eDP is always using ALPM aux less. */
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
+ intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+ }
/* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ intel_psr_get_enable_sink_offset(intel_dp), 0);
- if (intel_dp->psr.psr2_enabled)
+ if (!intel_dp->psr.panel_replay_enabled &&
+ intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
@@ -1888,7 +2162,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1921,7 +2195,7 @@ void intel_psr_resume(struct intel_dp *intel_dp)
{
struct intel_psr *psr = &intel_dp->psr;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -1994,6 +2268,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
@@ -2013,6 +2288,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
crtc_state->psr2_man_track_ctl);
+
+ if (!crtc_state->enable_psr2_su_region_et)
+ return;
+
+ intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2051,6 +2332,25 @@ exit:
crtc_state->psr2_man_track_ctl = val;
}
+static u32
+psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
+ bool full_update, bool cursor_in_su_area)
+{
+ int width, height;
+
+ if (!crtc_state->enable_psr2_su_region_et || full_update)
+ return 0;
+
+ if (!cursor_in_su_area)
+ return PIPESRC_WIDTH(0) |
+ PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
+
+ width = drm_rect_width(&crtc_state->psr2_su_area);
+ height = drm_rect_height(&crtc_state->psr2_su_area);
+
+ return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
+}
+
static void clip_area_update(struct drm_rect *overlap_damage_area,
struct drm_rect *damage_area,
struct drm_rect *pipe_src)
@@ -2095,21 +2395,38 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
* cursor fully when cursor is in SU area.
*/
static void
-intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *cursor_state)
+intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool *cursor_in_su_area)
{
- struct drm_rect inter;
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- if (!crtc_state->enable_psr2_su_region_et ||
- !cursor_state->uapi.visible)
+ if (!crtc_state->enable_psr2_su_region_et)
return;
- inter = crtc_state->psr2_su_area;
- if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
- return;
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ struct drm_rect inter;
- clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
- &crtc_state->pipe_src);
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (plane->id != PLANE_CURSOR)
+ continue;
+
+ if (!new_plane_state->uapi.visible)
+ continue;
+
+ inter = crtc_state->psr2_su_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
+ &crtc_state->pipe_src);
+ *cursor_in_su_area = true;
+ }
}
/*
@@ -2152,10 +2469,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state, *old_plane_state,
- *cursor_plane_state = NULL;
+ struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false;
+ bool full_update = false, cursor_in_su_area = false;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2238,13 +2554,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
-
- /*
- * Cursor plane new state is stored to adjust su area to cover
- * cursor are fully.
- */
- if (plane->id == PLANE_CURSOR)
- cursor_plane_state = new_plane_state;
}
/*
@@ -2273,9 +2582,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /* Adjust su area to cover cursor fully as necessary */
- if (cursor_plane_state)
- intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary (early
+ * transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible cursor is added into
+ * affected planes even when cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
@@ -2338,6 +2651,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, full_update);
+ crtc_state->pipe_srcsz_early_tpt =
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
+ cursor_in_su_area);
return 0;
}
@@ -2394,7 +2710,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
+ if (!crtc_state->has_psr)
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2994,6 +3310,13 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
}
}
+/*
+ * On common bits:
+ * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
+ * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
+ * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
+ * this function is relying on PSR definitions
+ */
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3003,7 +3326,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
return;
mutex_lock(&psr->lock);
@@ -3017,12 +3340,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
+ if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
+ (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
+ !error_status)
drm_dbg_kms(&dev_priv->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
@@ -3042,8 +3367,10 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
- psr_alpm_check(intel_dp);
- psr_capability_changed_check(intel_dp);
+ if (!psr->panel_replay_enabled) {
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+ }
exit:
mutex_unlock(&psr->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index cde781df84d5..d483c85870e1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,8 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 8427a736f639..ebc22999572c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -9,7 +9,7 @@
#include "intel_display_reg_defs.h"
#include "intel_dp_aux_regs.h"
-#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -23,7 +23,7 @@
#define HSW_SRD_CTL _MMIO(0x64800)
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_CTL_A)
#define EDP_PSR_ENABLE REG_BIT(31)
#define BDW_PSR_SINGLE_FRAME REG_BIT(30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */
@@ -66,8 +66,8 @@
#define EDP_PSR_IIR _MMIO(0x64838)
#define _PSR_IMR_A 0x60814
#define _PSR_IIR_A 0x60818
-#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
-#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
#define TGL_PSR_MASK REG_GENMASK(2, 0)
@@ -86,7 +86,7 @@
#define HSW_SRD_AUX_CTL _MMIO(0x64810)
#define _SRD_AUX_CTL_A 0x60810
#define _SRD_AUX_CTL_EDP 0x6f810
-#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A)
+#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_CTL_A)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK
@@ -96,12 +96,12 @@
#define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
#define HSW_SRD_STATUS _MMIO(0x64840)
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_STATUS_A)
#define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29)
#define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0)
#define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1)
@@ -126,14 +126,14 @@
#define HSW_SRD_PERF_CNT _MMIO(0x64844)
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0)
/* PSR_MASK on SKL+ */
#define HSW_SRD_DEBUG _MMIO(0x64860)
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_DEBUG_A)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28)
#define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27)
#define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26)
@@ -153,7 +153,7 @@
#define _PSR2_CTL_A 0x60900
#define _PSR2_CTL_EDP 0x6f900
-#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE REG_BIT(31)
#define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
@@ -195,7 +195,7 @@
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
#define _PSR_EVENT_TRANS_EDP 0x6f848
-#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17)
#define PSR_EVENT_PSR2_DISABLED REG_BIT(16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15)
@@ -215,13 +215,13 @@
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
-#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
-#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(dev_priv, tran, _PSR2_SU_STATUS_A + (index) * 4)
#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
@@ -229,7 +229,7 @@
#define _PSR2_MAN_TRK_CTL_A 0x60910
#define _PSR2_MAN_TRK_CTL_EDP 0x6f910
-#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_MAN_TRK_CTL_A)
#define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21)
#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
@@ -249,7 +249,7 @@
/* PSR2 Early transport */
#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
-#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
+#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(dev_priv, trans, _PIPE_SRCSZ_ERLY_TPT_A)
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
@@ -297,7 +297,7 @@
_SEL_FETCH_PLANE_BASE_1_A)
#define _ALPM_CTL_A 0x60950
-#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
+#define ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
@@ -321,7 +321,7 @@
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
#define _ALPM_CTL2_A 0x60954
-#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
+#define ALPM_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL2_A)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
@@ -335,7 +335,7 @@
#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
#define _PORT_ALPM_CTL_A 0x16fa2c
-#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
+#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_CTL_A)
#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
@@ -345,12 +345,16 @@
#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
-#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
+#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_LFPS_CTL_A)
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
-#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
-#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
-#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7
+#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK, (val) - PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
+#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a280448df771..14d5fefc9c5b 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,72 +9,72 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
-static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- i915->display.quirks.mask |= BIT(quirk);
+ display->quirks.mask |= BIT(quirk);
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
-static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+static void quirk_ssc_force_disable(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
- drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
+ intel_set_quirk(display, QUIRK_LVDS_SSC_DISABLE);
+ drm_info(display->drm, "applying lvds SSC disable quirk\n");
}
/*
* A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
* brightness value
*/
-static void quirk_invert_brightness(struct drm_i915_private *i915)
+static void quirk_invert_brightness(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
- drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
+ intel_set_quirk(display, QUIRK_INVERT_BRIGHTNESS);
+ drm_info(display->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_i915_private *i915)
+static void quirk_backlight_present(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
- drm_info(&i915->drm, "applying backlight present quirk\n");
+ intel_set_quirk(display, QUIRK_BACKLIGHT_PRESENT);
+ drm_info(display->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
* which is 300 ms greater than eDP spec T12 min.
*/
-static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+static void quirk_increase_t12_delay(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
- drm_info(&i915->drm, "Applying T12 delay quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_T12_DELAY);
+ drm_info(display->drm, "Applying T12 delay quirk\n");
}
/*
* GeminiLake NUC HDMI outputs require additional off time
* this allows the onboard retimer to correctly sync to signal
*/
-static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+static void quirk_increase_ddi_disabled_time(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
- drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
+ intel_set_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME);
+ drm_info(display->drm, "Applying Increase DDI Disabled quirk\n");
}
-static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
+static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
{
- intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
- drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
+ intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
+ drm_info(display->drm, "Applying no pps backlight power quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
};
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
- void (*hook)(struct drm_i915_private *i915);
+ void (*hook)(struct intel_display *display);
const struct dmi_system_id (*dmi_id_list)[];
};
@@ -203,9 +203,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
-void intel_init_quirks(struct drm_i915_private *i915)
+void intel_init_quirks(struct intel_display *display)
{
- struct pci_dev *d = to_pci_dev(i915->drm.dev);
+ struct pci_dev *d = to_pci_dev(display->drm->dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
@@ -216,15 +216,15 @@ void intel_init_quirks(struct drm_i915_private *i915)
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
- q->hook(i915);
+ q->hook(display);
}
for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(i915);
+ intel_dmi_quirks[i].hook(display);
}
}
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
- return i915->display.quirks.mask & BIT(quirk);
+ return display->quirks.mask & BIT(quirk);
}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 10a4d163149f..151c8f4ae576 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT,
@@ -19,7 +19,7 @@ enum intel_quirk_id {
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
};
-void intel_init_quirks(struct drm_i915_private *i915);
-bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
+void intel_init_quirks(struct intel_display *display);
+bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5f9e748adc89..d0d712405129 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -193,7 +193,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
}
#define to_intel_sdvo_connector_state(conn_state) \
- container_of((conn_state), struct intel_sdvo_connector_state, base.base)
+ container_of_const((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo);
@@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- encoder->audio_disable(encoder, old_crtc_state, conn_state);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -1948,7 +1944,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
int clock = mode->clock;
@@ -1956,9 +1952,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -2382,7 +2375,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
u64 *val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
+ const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index bc61e736f9b3..e6df1f92def5 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -44,12 +44,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
}
}
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
- enum phy phy, bool enable)
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
- if (!intel_phy_is_snps(i915, phy))
+ if (!intel_encoder_is_snps(encoder))
return;
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
@@ -63,7 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
@@ -1809,7 +1811,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->mpllb_state = *tables[i];
+ crtc_state->dpll_hw_state.mpllb = *tables[i];
return 0;
}
}
@@ -1821,8 +1823,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb;
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1879,7 +1881,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
void intel_mpllb_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1951,7 +1953,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
struct intel_mpllb_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ enum phy phy = intel_encoder_to_phy(encoder);
pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
@@ -1999,7 +2001,7 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_mpllb_state mpllb_hw_state = {};
- const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb;
struct intel_encoder *encoder;
if (!IS_DG2(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 515abf7c5902..bc08b92a7cd9 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -17,8 +17,8 @@ struct intel_mpllb_state;
enum phy;
void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
- enum phy phy, bool enable);
+void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
+ bool enable);
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index d7b440c8caef..36a253a19c74 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -47,6 +47,7 @@
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+#include "intel_sprite_regs.h"
static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
{
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_regs.h b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
new file mode 100644
index 000000000000..bb67705652b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_regs.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_SPRITE_REGS__
+#define __INTEL_SPRITE_REGS__
+
+#include "intel_display_reg_defs.h"
+
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE REG_BIT(31)
+#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
+#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
+#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
+#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
+#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
+#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
+#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
+#define DVS_SOURCE_KEY REG_BIT(22)
+#define DVS_RGB_ORDER_XBGR REG_BIT(20)
+#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
+#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
+#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
+#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
+#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
+#define DVS_ROTATE_180 REG_BIT(15)
+#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define DVS_TILED REG_BIT(10)
+#define DVS_DEST_KEY REG_BIT(2)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
+#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
+#define DVS_POS_X_MASK REG_GENMASK(15, 0)
+#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
+#define _DVSASIZE 0x72190
+#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
+#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
+#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
+#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define DVS_ADDR_MASK REG_GENMASK(31, 12)
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
+#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
+#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE REG_BIT(31)
+#define DVS_FILTER_MASK REG_GENMASK(30, 29)
+#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
+#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
+#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
+#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
+#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
+
+#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE REG_BIT(31)
+#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
+#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
+#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
+#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
+#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
+#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
+#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
+#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
+#define SPRITE_SOURCE_KEY REG_BIT(22)
+#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
+#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
+#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
+#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
+#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
+#define SPRITE_ROTATE_180 REG_BIT(15)
+#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
+#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
+#define SPRITE_TILED REG_BIT(10)
+#define SPRITE_DEST_KEY REG_BIT(2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
+#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
+#define _SPRA_SIZE 0x70290
+#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
+#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
+#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
+#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE REG_BIT(31)
+#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
+#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
+#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
+#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
+#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
+#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
+#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
+#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
+#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
+#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
+
+#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
+#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
+#define SP_ENABLE REG_BIT(31)
+#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
+#define SP_FORMAT_MASK REG_GENMASK(29, 26)
+#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
+#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
+#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
+#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
+#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
+#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
+#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
+#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
+#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
+#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
+#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
+#define SP_SOURCE_KEY REG_BIT(22)
+#define SP_YUV_FORMAT_BT709 REG_BIT(18)
+#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
+#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
+#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
+#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
+#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
+#define SP_ROTATE_180 REG_BIT(15)
+#define SP_TILED REG_BIT(10)
+#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
+#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
+#define SP_POS_Y_MASK REG_GENMASK(31, 16)
+#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
+#define SP_POS_X_MASK REG_GENMASK(15, 0)
+#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
+#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
+#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
+#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
+#define SP_WIDTH_MASK REG_GENMASK(15, 0)
+#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
+#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
+#define SP_ADDR_MASK REG_GENMASK(31, 12)
+#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
+#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
+#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
+#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
+#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
+#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
+#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
+#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
+#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
+#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
+#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
+#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
+#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
+#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
+#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
+#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
+#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
+#define SP_SH_COS_MASK REG_GENMASK(9, 0)
+#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
+
+#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
+#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
+#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
+
+#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
+#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
+#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
+
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define _MMIO_CHV_SPCSC(plane_id, reg) \
+ _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
+
+#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
+#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
+#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
+#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
+#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
+#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
+#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
+#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
+#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
+#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
+#define SPCSC_C1_MASK REG_GENMASK(30, 16)
+#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
+#define SPCSC_C0_MASK REG_GENMASK(14, 0)
+#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
+
+#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
+#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
+#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
+#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
+#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
+#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
+#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
+
+#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
+#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
+#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
+#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
+#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
+#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
+#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
+
+#endif /* __INTEL_SPRITE_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6b374d481cd9..9887967b2ca5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -100,11 +100,9 @@ static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && tc->mode == mode;
+ return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
}
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
@@ -124,11 +122,9 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
+ return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
}
/*
@@ -254,8 +250,7 @@ assert_tc_cold_blocked(struct intel_tc_port *tc)
static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -302,7 +297,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
@@ -375,9 +370,8 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
+ if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
return 4;
assert_tc_cold_blocked(tc);
@@ -458,9 +452,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
- enum port port = tc->dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
@@ -812,7 +804,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
- enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
@@ -1635,10 +1627,7 @@ static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return false;
return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
@@ -1740,11 +1729,9 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- if (!intel_phy_is_tc(i915, phy))
+ if (!intel_encoder_is_tc(&dig_port->base))
return;
cancel_delayed_work(&tc->link_reset_work);
@@ -1861,7 +1848,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
+ enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 2b77d399f1a1..9df0f1263913 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -885,7 +885,8 @@ struct intel_tv_connector_state {
bool bypass_vfilter;
};
-#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+#define to_intel_tv_connector_state(conn_state) \
+ container_of_const((conn_state), struct intel_tv_connector_state, base)
static struct drm_connector_state *
intel_tv_connector_duplicate_state(struct drm_connector *connector)
@@ -961,16 +962,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
{
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = i915->max_dotclk_freq;
+ int max_dotclk = i915->display.cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a9f44abfc9fc..228702c0e492 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -485,6 +485,7 @@ struct child_device_config {
u8 hdmi_iboost_level:4; /* 196+ */
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
+ u8 efp_index; /* 256+ */
} __packed;
struct bdb_general_definitions {
@@ -602,22 +603,22 @@ struct bdb_driver_features {
u8 custom_vbt_version; /* 155+ */
/* Driver Feature Flags */
- u16 rmpm_enabled:1; /* 165+ */
- u16 s2ddt_enabled:1; /* 165+ */
- u16 dpst_enabled:1; /* 165-227 */
- u16 bltclt_enabled:1; /* 165+ */
- u16 adb_enabled:1; /* 165-227 */
- u16 drrs_enabled:1; /* 165-227 */
- u16 grs_enabled:1; /* 165+ */
- u16 gpmt_enabled:1; /* 165+ */
- u16 tbt_enabled:1; /* 165+ */
+ u16 rmpm_enabled:1; /* 159+ */
+ u16 s2ddt_enabled:1; /* 159+ */
+ u16 dpst_enabled:1; /* 159-227 */
+ u16 bltclt_enabled:1; /* 159+ */
+ u16 adb_enabled:1; /* 159-227 */
+ u16 drrs_enabled:1; /* 159-227 */
+ u16 grs_enabled:1; /* 159+ */
+ u16 gpmt_enabled:1; /* 159+ */
+ u16 tbt_enabled:1; /* 159+ */
u16 psr_enabled:1; /* 165-227 */
u16 ips_enabled:1; /* 165+ */
- u16 dpfs_enabled:1; /* 165+ */
+ u16 dfps_enabled:1; /* 165+ */
u16 dmrrs_enabled:1; /* 174-227 */
u16 adt_enabled:1; /* ???-228 */
u16 hpd_wake:1; /* 201-240 */
- u16 pc_feature_valid:1;
+ u16 pc_feature_valid:1; /* 159+ */
} __packed;
/*
@@ -880,11 +881,12 @@ struct bdb_lvds_lfp_data_tail {
struct lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
- u8 obsolete1:5;
+ u8 i2c_pin:3; /* obsolete since ? */
+ u8 i2c_speed:2; /* obsolete since ? */
u16 pwm_freq_hz;
u8 min_brightness; /* ???-233 */
- u8 obsolete2;
- u8 obsolete3;
+ u8 i2c_address; /* obsolete since ? */
+ u8 i2c_command; /* obsolete since ? */
} __packed;
struct lfp_backlight_control_method {
@@ -897,16 +899,11 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
- offsetof(struct bdb_lfp_backlight_data, brightness_level)
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
- offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
-
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* ???-233 */
- struct lfp_backlight_control_method backlight_control[16];
+ u8 level[16]; /* 162-233 */
+ struct lfp_backlight_control_method backlight_control[16]; /* 191+ */
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
u8 brightness_precision_bits[16]; /* 236+ */
@@ -917,7 +914,7 @@ struct bdb_lfp_backlight_data {
* Block 44 - LFP Power Conservation Features Block
*/
struct lfp_power_features {
- u8 reserved1:1;
+ u8 dpst_support:1; /* ???-159 */
u8 power_conservation_pref:3;
u8 reserved2:1;
u8 lace_enabled_status:1; /* 210+ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5d905f932cb4..894ee97b3e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -9,6 +9,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vrr.h"
+#include "intel_dp.h"
bool intel_vrr_is_capable(struct intel_connector *connector)
{
@@ -113,10 +114,18 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
@@ -165,6 +174,14 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (crtc_state->uapi.vrr_enabled) {
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ if (intel_dp_as_sdp_supported(intel_dp)) {
+ crtc_state->vrr.vsync_start =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->vrr.vsync_end =
+ (crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->hw.adjusted_mode.vsync_end);
+ }
}
}
@@ -187,10 +204,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * TRANS_SET_CONTEXT_LATENCY with VRR enabled
- * requires this chicken bit on ADL/DG2.
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (DISPLAY_VER(dev_priv) == 13)
+ if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
@@ -239,6 +257,12 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder),
+ VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
+ VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
@@ -257,13 +281,16 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+
+ if (HAS_AS_SDP(dev_priv))
+ intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 trans_vrr_ctl;
+ u32 trans_vrr_ctl, trans_vrr_vsync;
trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
@@ -283,6 +310,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
}
- if (crtc_state->vrr.enable)
+ if (crtc_state->vrr.enable) {
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ if (HAS_AS_SDP(dev_priv)) {
+ trans_vrr_vsync =
+ intel_de_read(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder));
+ crtc_state->vrr.vsync_start =
+ REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
+ crtc_state->vrr.vsync_end =
+ REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
+ }
+ }
}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 8a934bada624..baa601d27815 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -213,10 +213,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* The pipe scaler does not use all the bits of PIPESRC, at least
* on the earlier platforms. So even when we're scaling a plane
* the *pipe* source size must not be too large. For simplicity
- * we assume the limits match the scaler source size limits. Might
- * not be 100% accurate on all platforms, but good enough for now.
+ * we assume the limits match the scaler destination size limits.
+ * Might not be 100% accurate on all platforms, but good enough for
+ * now.
*/
- if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e941e2e4fd14..860574d04f88 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index c6b9be80d83c..7c6187b4479f 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,18 +6,19 @@
#include <drm/drm_blend.h>
#include "i915_drv.h"
-#include "i915_fixed.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fixed.h"
#include "intel_pcode.h"
#include "intel_wm.h"
#include "skl_watermark.h"
@@ -69,7 +70,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 9;
}
-static bool
+bool
intel_has_sagv(struct drm_i915_private *i915)
{
return HAS_SAGV(i915) &&
@@ -2601,10 +2602,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915))
+ if (HAS_MBUS_JOINING(i915)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
+ if (ret)
+ return ret;
+ }
+ }
+
for_each_intel_crtc(&i915->drm, crtc) {
enum pipe pipe = crtc->pipe;
@@ -2628,13 +2636,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes_late(state, "MBUS joining change");
- if (ret)
- return ret;
- }
-
drm_dbg_kms(&i915->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
@@ -3057,6 +3058,8 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (HAS_MBUS_JOINING(i915))
dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
+
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -3530,85 +3533,6 @@ int intel_dbuf_init(struct drm_i915_private *i915)
return 0;
}
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(i915))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(i915, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(i915,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(i915,
- new_dbuf_state->enabled_slices);
-}
-
static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
@@ -3628,14 +3552,12 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
return false;
}
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
const struct intel_crtc *crtc;
u32 val = 0;
- int i;
if (DISPLAY_VER(i915) < 11)
return;
@@ -3679,12 +3601,9 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
u32 pipe_val = val;
- if (!new_crtc_state->hw.active)
- continue;
-
if (DISPLAY_VER(i915) >= 14) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
new_dbuf_state->active_pipes))
@@ -3697,6 +3616,217 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
}
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(dbuf_state))
+ return PTR_ERR(dbuf_state);
+
+ dbuf_state->mdclk_cdclk_ratio = ratio;
+
+ return intel_atomic_lock_global_state(&dbuf_state->base);
+}
+
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus)
+{
+ enum dbuf_slice slice;
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+
+ if (joined_mbus)
+ ratio *= 2;
+
+ drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ ratio, str_yes_no(joined_mbus));
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+}
+
+static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ int mdclk_cdclk_ratio;
+
+ if (intel_cdclk_is_decreasing_later(state)) {
+ /* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
+ mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
+ } else {
+ /* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
+ mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
+ }
+
+ intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ new_dbuf_state->joined_mbus);
+}
+
+static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
+ return pipe;
+ else
+ return INVALID_PIPE;
+}
+
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ u32 mbus_ctl;
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ if (new_dbuf_state->joined_mbus)
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
+ else
+ mbus_ctl = MBUS_HASHING_MODE_2x2;
+
+ if (pipe != INVALID_PIPE)
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
+ else
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+}
+
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
+{
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mbus_join_update(state, pipe);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mdclk_min_tracker_update(state);
+ }
+}
+
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mbus_join_update(state, pipe);
+
+ if (pipe != INVALID_PIPE) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ } else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
+ old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ }
+
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices;
+ new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
+
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+ new_slices = new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915, new_slices);
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index e3d1d74a7b17..91f92c0e706e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -25,6 +25,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool intel_can_enable_sagv(struct drm_i915_private *i915,
const struct intel_bw_state *bw_state);
+bool intel_has_sagv(struct drm_i915_private *i915);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
const struct skl_ddb_entry *entry);
@@ -58,22 +59,31 @@ struct intel_dbuf_state {
u8 slices[I915_MAX_PIPES];
u8 enabled_slices;
u8 active_pipes;
+ u8 mdclk_cdclk_ratio;
bool joined_mbus;
};
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define to_intel_dbuf_state(global_state) \
+ container_of_const((global_state), struct intel_dbuf_state, base)
+
#define intel_atomic_get_old_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio);
+
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus);
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index 20b30c9a6613..269163fa3350 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -32,14 +32,16 @@
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
-#define MBUS_CTL _MMIO(0x4438C)
-#define MBUS_JOIN REG_BIT(31)
-#define MBUS_HASHING_MODE_MASK REG_BIT(30)
-#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
-#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
-#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
-#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
-#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
+#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
/* Watermark register definitions for SKL */
#define _CUR_WM_A_0 0x70140
diff --git a/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
new file mode 100644
index 000000000000..2b83f334b1ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __VLV_DPIO_PHY_REGS_H__
+#define __VLV_DPIO_PHY_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _VLV_CMN(dw) (0x8100 + (dw) * 4)
+#define _CHV_CMN(cl, dw) (0x8100 - (cl) * 0x80 + (dw) * 4)
+#define _VLV_PLL(ch, dw) (0x8000 + (ch) * 0x20 + (dw) * 4) /* dw 0-7,16-23 */
+#define _CHV_PLL(ch, dw) (0x8000 + (ch) * 0x180 + (dw) * 4)
+#define _VLV_REF(dw) (0x80a0 + ((dw) - 8) * 4) /* dw 8-15 */
+#define _VLV_PCS(ch, spline, dw) (0x200 + (ch) * 0x2400 + (spline) * 0x200 + (dw) * 4)
+#define _VLV_PCS_GRP(ch, dw) (0x8200 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_PCS_BCAST(dw) (0xc000 + (dw) * 4)
+#define _VLV_TX(ch, lane, dw) (0x80 + (ch) * 0x2400 + (lane) * 0x200 + (dw) * 4)
+#define _VLV_TX_GRP(ch, dw) (0x8280 + (ch) * 0x200 + (dw) * 4)
+#define _VLV_TX_BCAST(dw) (0xc080 + (dw) * 4)
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define VLV_PLL_DW3(ch) _VLV_PLL((ch), 3)
+#define DPIO_S1_DIV_MASK REG_GENMASK(30, 28)
+#define DPIO_S1_DIV(s1) REG_FIELD_PREP(DPIO_S1_DIV_MASK, (s1))
+#define DPIO_S1_DIV_DAC 0 /* 10, DAC 25-225M rate */
+#define DPIO_S1_DIV_HDMIDP 1 /* 5, DAC 225-400M rate */
+#define DPIO_S1_DIV_LVDS1 2 /* 14 */
+#define DPIO_S1_DIV_LVDS2 3 /* 7 */
+#define DPIO_K_DIV_MASK REG_GENMASK(27, 24)
+#define DPIO_K_DIV(k) REG_FIELD_PREP(DPIO_K_DIV_MASK, (k))
+#define DPIO_P1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_P1_DIV(p1) REG_FIELD_PREP(DPIO_P1_DIV_MASK, (p1))
+#define DPIO_P2_DIV_MASK REG_GENMASK(20, 16)
+#define DPIO_P2_DIV(p2) REG_FIELD_PREP(DPIO_P2_DIV_MASK, (p2))
+#define DPIO_N_DIV_MASK REG_GENMASK(15, 12)
+#define DPIO_N_DIV(n) REG_FIELD_PREP(DPIO_N_DIV_MASK, (n))
+#define DPIO_ENABLE_CALIBRATION REG_BIT(11)
+#define DPIO_M1_DIV_MASK REG_GENMASK(10, 8)
+#define DPIO_M1_DIV(m1) REG_FIELD_PREP(DPIO_M1_DIV_MASK, (m1))
+#define DPIO_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_M2_DIV(m2) REG_FIELD_PREP(DPIO_M2_DIV_MASK, (m2))
+
+#define VLV_PLL_DW5(ch) _VLV_PLL((ch), 5)
+#define DPIO_REFSEL_OVERRIDE REG_BIT(27)
+#define DPIO_PLL_MODESEL_MASK REG_GENMASK(26, 24)
+#define DPIO_BIAS_CURRENT_CTL_MASK REG_GENMASK(22, 20) /* always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_MASK REG_GENMASK(17, 16)
+#define DPIO_DRIVER_CTL_MASK REG_GENMASK(15, 12) /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_MASK REG_GENMASK(11, 8) /* always set to 0x5 */
+
+#define VLV_PLL_DW7(ch) _VLV_PLL((ch), 7)
+
+#define VLV_PLL_DW16(ch) _VLV_PLL((ch), 16)
+
+#define VLV_PLL_DW17(ch) _VLV_PLL((ch), 17)
+
+#define VLV_PLL_DW18(ch) _VLV_PLL((ch), 18)
+
+#define VLV_PLL_DW19(ch) _VLV_PLL((ch), 19)
+
+#define VLV_REF_DW11 _VLV_REF(11)
+
+#define VLV_CMN_DW0 _VLV_CMN(0)
+
+/*
+ * Per DDI channel DPIO regs
+ */
+#define VLV_PCS_DW0_GRP(ch) _VLV_PCS_GRP((ch), 0)
+#define VLV_PCS01_DW0(ch) _VLV_PCS((ch), 0, 0)
+#define VLV_PCS23_DW0(ch) _VLV_PCS((ch), 1, 0)
+#define DPIO_PCS_TX_LANE2_RESET REG_BIT(16)
+#define DPIO_PCS_TX_LANE1_RESET REG_BIT(7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 REG_BIT(4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 REG_BIT(3)
+
+#define VLV_PCS_DW1_GRP(ch) _VLV_PCS_GRP((ch), 1)
+#define VLV_PCS01_DW1(ch) _VLV_PCS((ch), 0, 1)
+#define VLV_PCS23_DW1(ch) _VLV_PCS((ch), 1, 1)
+#define CHV_PCS_REQ_SOFTRESET_EN REG_BIT(23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN REG_BIT(22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN REG_BIT(21)
+#define DPIO_PCS_CLK_DATAWIDTH_MASK REG_GENMASK(7, 6)
+#define DPIO_PCS_CLK_DATAWIDTH_8_10 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 1)
+#define DPIO_PCS_CLK_DATAWIDTH_16_20 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 2)
+#define DPIO_PCS_CLK_DATAWIDTH_32_40 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 3)
+#define DPIO_PCS_CLK_SOFT_RESET REG_BIT(5)
+
+#define VLV_PCS_DW8_GRP(ch) _VLV_PCS_GRP((ch), 8)
+#define VLV_PCS01_DW8(ch) _VLV_PCS((ch), 0, 8)
+#define VLV_PCS23_DW8(ch) _VLV_PCS((ch), 1, 8)
+#define DPIO_PCS_USEDCLKCHANNEL REG_BIT(21)
+#define DPIO_PCS_USEDCLKCHANNEL_OVRRIDE REG_BIT(20)
+
+#define VLV_PCS_DW9_GRP(ch) _VLV_PCS_GRP((ch), 9)
+#define VLV_PCS01_DW9(ch) _VLV_PCS((ch), 0, 9)
+#define VLV_PCS23_DW9(ch) _VLV_PCS((ch), 1, 9)
+#define DPIO_PCS_TX2MARGIN_MASK REG_GENMASK(15, 13)
+#define DPIO_PCS_TX2MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 0)
+#define DPIO_PCS_TX2MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 1)
+#define DPIO_PCS_TX1MARGIN_MASK REG_GENMASK(12, 10)
+#define DPIO_PCS_TX1MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 0)
+#define DPIO_PCS_TX1MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 1)
+
+#define VLV_PCS_DW10_GRP(ch) _VLV_PCS_GRP((ch), 10)
+#define VLV_PCS01_DW10(ch) _VLV_PCS((ch), 0, 10)
+#define VLV_PCS23_DW10(ch) _VLV_PCS((ch), 1, 10)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 REG_BIT(31)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 REG_BIT(30)
+#define DPIO_PCS_TX2DEEMP_MASK REG_GENMASK(27, 24)
+#define DPIO_PCS_TX2DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 0)
+#define DPIO_PCS_TX2DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 2)
+#define DPIO_PCS_TX1DEEMP_MASK REG_GENMASK(19, 16)
+#define DPIO_PCS_TX1DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 0)
+#define DPIO_PCS_TX1DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 2)
+
+#define VLV_PCS_DW11_GRP(ch) _VLV_PCS_GRP((ch), 11)
+#define VLV_PCS01_DW11(ch) _VLV_PCS((ch), 0, 11)
+#define VLV_PCS23_DW11(ch) _VLV_PCS((ch), 1, 11)
+#define DPIO_TX2_STAGGER_MASK_MASK REG_GENMASK(28, 24)
+#define DPIO_TX2_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MASK_MASK, (x))
+#define DPIO_LANEDESKEW_STRAP_OVRD REG_BIT(3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER REG_BIT(1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER REG_BIT(0)
+
+#define VLV_PCS_DW12_GRP(ch) _VLV_PCS_GRP((ch), 12)
+#define VLV_PCS01_DW12(ch) _VLV_PCS((ch), 0, 12)
+#define VLV_PCS23_DW12(ch) _VLV_PCS((ch), 1, 12)
+#define DPIO_TX2_STAGGER_MULT_MASK REG_GENMASK(22, 20)
+#define DPIO_TX2_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MULT_MASK REG_GENMASK(20, 16)
+#define DPIO_TX1_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MULT_MASK, (x))
+#define DPIO_TX1_STAGGER_MASK_MASK REG_GENMASK(12, 8)
+#define DPIO_TX1_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MASK_MASK, (x))
+#define DPIO_LANESTAGGER_STRAP_OVRD REG_BIT(6)
+#define DPIO_LANESTAGGER_STRAP_MASK REG_GENMASK(4, 0)
+#define DPIO_LANESTAGGER_STRAP(x) REG_FIELD_PREP(DPIO_LANESTAGGER_STRAP_MASK, (x))
+
+#define VLV_PCS_DW14_GRP(ch) _VLV_PCS_GRP((ch), 14)
+#define VLV_PCS01_DW14(ch) _VLV_PCS((ch), 0, 14)
+#define VLV_PCS23_DW14(ch) _VLV_PCS((ch), 1, 14)
+
+#define VLV_PCS_DW17_BCAST _VLV_PCS_BCAST(17)
+#define VLV_PCS_DW17_GRP(ch) _VLV_PCS_GRP((ch), 17)
+#define VLV_PCS01_DW17(ch) _VLV_PCS((ch), 0, 17)
+#define VLV_PCS23_DW17(ch) _VLV_PCS((ch), 1, 17)
+
+#define VLV_PCS_DW23_GRP(ch) _VLV_PCS_GRP((ch), 23)
+#define VLV_PCS01_DW23(ch) _VLV_PCS((ch), 0, 23)
+#define VLV_PCS23_DW23(ch) _VLV_PCS((ch), 1, 23)
+
+#define VLV_TX_DW2_GRP(ch) _VLV_TX_GRP((ch), 2)
+#define VLV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define DPIO_SWING_MARGIN000_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN000(x) REG_FIELD_PREP(DPIO_SWING_MARGIN000_MASK, (x))
+#define DPIO_UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8)
+#define DPIO_UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(DPIO_UNIQ_TRANS_SCALE_MASK, (x))
+
+#define VLV_TX_DW3_GRP(ch) _VLV_TX_GRP((ch), 3)
+#define VLV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+/* The following bit for CHV phy */
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN REG_BIT(27)
+#define DPIO_SWING_MARGIN101_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_MARGIN101(x) REG_FIELD_PREP(DPIO_SWING_MARGIN101_MASK, (x))
+
+#define VLV_TX_DW4_GRP(ch) _VLV_TX_GRP((ch), 4)
+#define VLV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define DPIO_SWING_DEEMPH9P5_MASK REG_GENMASK(31, 24)
+#define DPIO_SWING_DEEMPH9P5(x) REG_FIELD_PREP(DPIO_SWING_DEEMPH9P5_MASK, (x))
+#define DPIO_SWING_DEEMPH6P0_MASK REG_GENMASK(23, 16)
+#define DPIO_SWING_DEEMPH6P0_SHIFT REG_FIELD_PREP(DPIO_SWING_DEEMPH6P0_MASK, (x))
+
+#define VLV_TX_DW5_GRP(ch) _VLV_TX_GRP((ch), 5)
+#define VLV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define DPIO_TX_OCALINIT_EN REG_BIT(31)
+
+#define VLV_TX_DW11_GRP(ch) _VLV_TX_GRP((ch), 11)
+#define VLV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+
+#define VLV_TX_DW14_GRP(ch) _VLV_TX_GRP((ch), 14)
+#define VLV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+
+/* CHV dpPhy registers */
+#define CHV_PLL_DW0(ch) _CHV_PLL((ch), 0)
+#define DPIO_CHV_M2_DIV_MASK REG_GENMASK(7, 0)
+#define DPIO_CHV_M2_DIV(m2) REG_FIELD_PREP(DPIO_CHV_M2_DIV_MASK, (m2))
+
+#define CHV_PLL_DW1(ch) _CHV_PLL((ch), 1)
+#define DPIO_CHV_N_DIV_MASK REG_GENMASK(11, 8)
+#define DPIO_CHV_N_DIV(n) REG_FIELD_PREP(DPIO_CHV_N_DIV_MASK, (n))
+#define DPIO_CHV_M1_DIV_MASK REG_GENMASK(2, 0)
+#define DPIO_CHV_M1_DIV(m1) REG_FIELD_PREP(DPIO_CHV_M1_DIV_MASK, (m1))
+#define DPIO_CHV_M1_DIV_BY_2 0
+
+#define CHV_PLL_DW2(ch) _CHV_PLL((ch), 2)
+#define DPIO_CHV_M2_FRAC_DIV_MASK REG_GENMASK(21, 0)
+#define DPIO_CHV_M2_FRAC_DIV(m2_frac) REG_FIELD_PREP(DPIO_CHV_M2_FRAC_DIV_MASK, (m2_frac))
+
+#define CHV_PLL_DW3(ch) _CHV_PLL((ch), 3)
+#define DPIO_CHV_FRAC_DIV_EN REG_BIT(16)
+#define DPIO_CHV_SECOND_MOD REG_BIT(8)
+#define DPIO_CHV_FEEDFWD_GAIN_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_FEEDFWD_GAIN(x) REG_FIELD_PREP(DPIO_CHV_FEEDFWD_GAIN_MASK, (x))
+
+#define CHV_PLL_DW6(ch) _CHV_PLL((ch), 6)
+#define DPIO_CHV_GAIN_CTRL_MASK REG_GENMASK(18, 16)
+#define DPIO_CHV_GAIN_CTRL(x) REG_FIELD_PREP(DPIO_CHV_GAIN_CTRL_MASK, (x))
+#define DPIO_CHV_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_INT_COEFF(x) REG_FIELD_PREP(DPIO_CHV_INT_COEFF_MASK, (x))
+#define DPIO_CHV_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define DPIO_CHV_PROP_COEFF(x) REG_FIELD_PREP(DPIO_CHV_PROP_COEFF_MASK, (x))
+
+#define CHV_PLL_DW8(ch) _CHV_PLL((ch), 8)
+#define DPIO_CHV_TDC_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define DPIO_CHV_TDC_TARGET_CNT(x) REG_FIELD_PREP(DPIO_CHV_TDC_TARGET_CNT_MASK, (x))
+
+#define CHV_PLL_DW9(ch) _CHV_PLL((ch), 9)
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define DPIO_CHV_INT_LOCK_THRESHOLD(x) REG_FIELD_PREP(DPIO_CHV_INT_LOCK_THRESHOLD_MASK, (x))
+#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE REG_BIT(0) /* 1: coarse & 0 : fine */
+
+#define CHV_CMN_DW0_CH0 _CHV_CMN(0, 0)
+#define DPIO_ALLDL_POWERDOWN_CH0 REG_BIT(19)
+#define DPIO_ANYDL_POWERDOWN_CH0 REG_BIT(18)
+#define DPIO_ALLDL_POWERDOWN BIT(1)
+#define DPIO_ANYDL_POWERDOWN BIT(0)
+
+#define CHV_CMN_DW5_CH0 _CHV_CMN(0, 5)
+#define CHV_BUFRIGHTENA1_MASK REG_GENMASK(21, 20)
+#define CHV_BUFRIGHTENA1_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 0)
+#define CHV_BUFRIGHTENA1_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 1)
+#define CHV_BUFRIGHTENA1_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 3)
+#define CHV_BUFLEFTENA1_MASK REG_GENMASK(23, 22)
+#define CHV_BUFLEFTENA1_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 0)
+#define CHV_BUFLEFTENA1_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 1)
+#define CHV_BUFLEFTENA1_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 3)
+
+#define CHV_CMN_DW13_CH0 _CHV_CMN(0, 13)
+#define CHV_CMN_DW0_CH1 _CHV_CMN(1, 0)
+#define DPIO_CHV_S1_DIV_MASK REG_GENMASK(23, 21)
+#define DPIO_CHV_S1_DIV(s1) REG_FIELD_PREP(DPIO_CHV_S1_DIV_MASK, (s1))
+#define DPIO_CHV_P1_DIV_MASK REG_GENMASK(15, 13)
+#define DPIO_CHV_P1_DIV(p1) REG_FIELD_PREP(DPIO_CHV_P1_DIV_MASK, (p1))
+#define DPIO_CHV_P2_DIV_MASK REG_GENMASK(12, 8)
+#define DPIO_CHV_P2_DIV(p2) REG_FIELD_PREP(DPIO_CHV_P2_DIV_MASK, (p2))
+#define DPIO_CHV_K_DIV_MASK REG_GENMASK(7, 4)
+#define DPIO_CHV_K_DIV(k) REG_FIELD_PREP(DPIO_CHV_K_DIV_MASK, (k))
+#define DPIO_PLL_FREQLOCK REG_BIT(1)
+#define DPIO_PLL_LOCK REG_BIT(0)
+#define CHV_CMN_DW13(ch) _PIPE(ch, CHV_CMN_DW13_CH0, CHV_CMN_DW0_CH1)
+
+#define CHV_CMN_DW14_CH0 _CHV_CMN(0, 14)
+#define CHV_CMN_DW1_CH1 _CHV_CMN(1, 1)
+#define DPIO_AFC_RECAL REG_BIT(14)
+#define DPIO_DCLKP_EN REG_BIT(13)
+#define CHV_BUFLEFTENA2_MASK REG_GENMASK(18, 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 0)
+#define CHV_BUFLEFTENA2_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 1)
+#define CHV_BUFLEFTENA2_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 3)
+#define CHV_BUFRIGHTENA2_MASK REG_GENMASK(20, 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 0)
+#define CHV_BUFRIGHTENA2_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 1)
+#define CHV_BUFRIGHTENA2_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 3)
+#define CHV_CMN_DW14(ch) _PIPE(ch, CHV_CMN_DW14_CH0, CHV_CMN_DW1_CH1)
+
+#define CHV_CMN_DW19_CH0 _CHV_CMN(0, 19)
+#define CHV_CMN_DW6_CH1 _CHV_CMN(1, 6)
+#define DPIO_ALLDL_POWERDOWN_CH1 REG_BIT(30) /* CL2 DW6 only */
+#define DPIO_ANYDL_POWERDOWN_CH1 REG_BIT(29) /* CL2 DW6 only */
+#define DPIO_DYNPWRDOWNEN_CH1 REG_BIT(28) /* CL2 DW6 only */
+#define CHV_CMN_USEDCLKCHANNEL REG_BIT(13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, CHV_CMN_DW19_CH0, CHV_CMN_DW6_CH1)
+
+#define CHV_CMN_DW28 _CHV_CMN(0, 28)
+#define DPIO_CL1POWERDOWNEN REG_BIT(23)
+#define DPIO_DYNPWRDOWNEN_CH0 REG_BIT(22)
+#define DPIO_SUS_CLK_CONFIG_MASK REG_GENMASK(1, 0)
+#define DPIO_SUS_CLK_CONFIG_ON REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 0)
+#define DPIO_SUS_CLK_CONFIG_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 1)
+#define DPIO_SUS_CLK_CONFIG_GATE REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 2)
+#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 3)
+
+#define CHV_CMN_DW30 _CHV_CMN(0, 30)
+#define DPIO_CL2_LDOFUSE_PWRENB REG_BIT(6)
+#define DPIO_LRC_BYPASS REG_BIT(3)
+
+#define CHV_TX_DW0(ch, lane) _VLV_TX((ch), (lane), 0)
+#define CHV_TX_DW1(ch, lane) _VLV_TX((ch), (lane), 1)
+#define CHV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2)
+#define CHV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3)
+#define CHV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4)
+#define CHV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5)
+#define CHV_TX_DW6(ch, lane) _VLV_TX((ch), (lane), 6)
+#define CHV_TX_DW7(ch, lane) _VLV_TX((ch), (lane), 7)
+#define CHV_TX_DW8(ch, lane) _VLV_TX((ch), (lane), 8)
+#define CHV_TX_DW9(ch, lane) _VLV_TX((ch), (lane), 9)
+#define CHV_TX_DW10(ch, lane) _VLV_TX((ch), (lane), 10)
+
+#define CHV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11)
+#define DPIO_FRC_LATENCY_MASK REG_GENMASK(10, 8)
+#define DPIO_FRC_LATENCY(x) REG_FIELD_PREP(DPIO_FRC_LATENCY_MASK, (x))
+
+#define CHV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14)
+#define DPIO_UPAR REG_BIT(30)
+
+#endif /* __VLV_DPIO_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 9b33b8a74d64..ee9923c7b115 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,20 +85,18 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port),
mask, 100))
- drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
+ drm_err(display->drm, "DPI FIFOs are not empty\n");
}
-static void write_data(struct drm_i915_private *dev_priv,
+static void write_data(struct intel_display *display,
i915_reg_t reg,
const u8 *data, u32 len)
{
@@ -110,18 +108,18 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
}
-static void read_data(struct drm_i915_private *dev_priv,
+static void read_data(struct intel_display *display,
i915_reg_t reg,
u8 *data, u32 len)
{
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -132,8 +130,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
- struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dsi *intel_dsi = intel_dsi_host->intel_dsi;
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
@@ -148,51 +146,51 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
header = packet.header;
if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
- data_reg = MIPI_LP_GEN_DATA(port);
+ data_reg = MIPI_LP_GEN_DATA(display, port);
data_mask = LP_DATA_FIFO_FULL;
- ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_reg = MIPI_LP_GEN_CTRL(display, port);
ctrl_mask = LP_CTRL_FIFO_FULL;
} else {
- data_reg = MIPI_HS_GEN_DATA(port);
+ data_reg = MIPI_HS_GEN_DATA(display, port);
data_mask = HS_DATA_FIFO_FULL;
- ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_reg = MIPI_HS_GEN_CTRL(display, port);
ctrl_mask = HS_CTRL_FIFO_FULL;
}
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP DATA FIFO !full\n");
- write_data(dev_priv, data_reg, packet.payload,
+ write_data(display, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ intel_de_write(display, MIPI_INTR_STAT(display, port),
GEN_READ_DATA_AVAIL);
}
- if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
ctrl_mask, 50)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- intel_de_write(dev_priv, ctrl_reg,
+ intel_de_write(display, ctrl_reg,
header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port),
data_mask, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for read data.\n");
- read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ read_data(display, data_reg, msg->rx_buf, msg->rx_len);
}
/* XXX: fix for reads and writes */
@@ -225,9 +223,7 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
enum port port)
{
- struct drm_encoder *encoder = &intel_dsi->base.base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 mask;
/* XXX: pipe, hs */
@@ -237,18 +233,18 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
- drm_dbg_kms(&dev_priv->drm,
+ if (cmd == intel_de_read(display, MIPI_DPI_CONTROL(display, port)))
+ drm_dbg_kms(display->drm,
"Same special packet %02x twice in a row.\n", cmd);
- intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100))
+ drm_err(display->drm,
"Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -273,8 +269,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
- base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -329,7 +324,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
bool cold_boot = false;
@@ -339,29 +334,30 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
+ intel_de_rmw(display, MIPI_CTRL(display, port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ u32 tmp = intel_de_read(display, MIPI_DEVICE_READY(display, port));
+
+ intel_de_rmw(display, MIPI_CTRL(display, port),
GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
+ drm_err(display->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY);
}
return cold_boot;
@@ -369,99 +365,100 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not ON\n");
+ drm_err(display->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) {
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_ULPS_NOT_ACTIVE, 20))
- drm_err(&dev_priv->drm, "ULPS not active\n");
+ drm_err(display->drm, "ULPS not active\n");
/* Exit ULPS */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK,
ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_LP_WAKE, 0);
}
}
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
GLK_DATA_LANE_STOP_STATE, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"D-PHY not entering LP-11 state\n");
}
}
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val = intel_de_read(display, MIPI_DEVICE_READY(display, port));
val &= ~ULPS_STATE_MASK;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), val);
}
}
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -474,7 +471,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_ENTER);
usleep_range(2500, 3000);
@@ -482,14 +479,14 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
+ intel_de_rmw(display, VLV_MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY);
usleep_range(2500, 3000);
}
@@ -509,50 +506,50 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_rmw(display, MIPI_DEVICE_READY(display, port),
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_MIPIIO_PORT_POWERED, 20))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"MIPI IO Port is not powergated\n");
}
}
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Put the IO into reset */
- intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
GLK_PHY_STATUS_PORT_READY, 20))
- drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
+ intel_de_rmw(display, MIPI_CTRL(display, port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -564,30 +561,31 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
{
return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ intel_de_write(display, MIPI_DEVICE_READY(display, port),
DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
@@ -596,15 +594,15 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
- intel_de_wait_for_clear(dev_priv, port_ctrl,
+ intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
- drm_err(&dev_priv->drm, "DSI LP not going Low\n");
+ drm_err(display->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
+ intel_de_rmw(display, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -612,6 +610,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -622,11 +621,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ intel_de_rmw(display, VLV_CHICKEN_3,
PIXEL_OVERLAP_CNT_MASK,
temp << PIXEL_OVERLAP_CNT_SHIFT);
}
@@ -636,7 +635,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
- temp = intel_de_read(dev_priv, port_ctrl);
+ temp = intel_de_read(display, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -656,15 +655,15 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_write(display, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(display, port_ctrl);
}
}
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -672,11 +671,12 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
- intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
- intel_de_posting_read(dev_priv, port_ctrl);
+ intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
+ intel_de_posting_read(display, port_ctrl);
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -726,6 +726,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -733,7 +734,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
enum port port;
bool glk_cold_boot = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_wait_panel_power_cycle(intel_dsi);
@@ -753,16 +754,16 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -798,8 +799,8 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
*/
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv,
- MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(display,
+ MIPI_MAX_RETURN_PKT_SIZE(display, port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -871,11 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -906,12 +908,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
@@ -919,7 +921,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -935,13 +937,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -960,7 +963,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
- bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
+ bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -969,27 +972,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
+ enabled = intel_de_read(display, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = intel_de_read(dev_priv,
- MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(display,
+ MIPI_DSI_FUNC_PRG(display, port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
+ if (drm_WARN_ON(display->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1010,8 +1013,7 @@ out_put_power:
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
@@ -1033,11 +1035,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(display, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(display, MIPI_DSI_FUNC_PRG(display, port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1049,24 +1051,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- intel_de_read(dev_priv,
+ intel_de_read(display,
BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
- hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port));
+ hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1083,8 +1085,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
- vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port));
+ vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1210,12 +1212,12 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-static void set_dsi_timings(struct drm_encoder *encoder,
+static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1256,29 +1258,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_HACTIVE(port),
adjusted_mode->crtc_hdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
- intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
adjusted_mode->crtc_vtotal);
}
- intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
hactive);
- intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(display, MIPI_HFP_COUNT(display, port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_HSYNC_PADDING_COUNT(display, port),
hsync);
- intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(display, MIPI_HBP_COUNT(display, port), hbp);
/* vertical values are in terms of lines */
- intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
- intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ intel_de_write(display, MIPI_VFP_COUNT(display, port), vfp);
+ intel_de_write(display, MIPI_VSYNC_PADDING_COUNT(display, port),
vsync);
- intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(display, MIPI_VBP_COUNT(display, port), vbp);
}
}
@@ -1299,21 +1301,20 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1329,31 +1330,31 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(display, MIPI_CTRL(display, PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ intel_de_write(display, MIPI_CTRL(display, PORT_A),
tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
- intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_STAT(display, port), 0xffffffff);
+ intel_de_write(display, MIPI_INTR_EN(display, port), 0xffffffff);
- intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_de_write(display, MIPI_DPHY_PARAM(display, port),
intel_dsi->dphy_reg);
- intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ intel_de_write(display, MIPI_DPI_RESOLUTION(display, port),
adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
@@ -1381,7 +1382,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(display, MIPI_DSI_FUNC_PRG(display, port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1402,23 +1403,23 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode == BURST_MODE) {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port),
txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_de_write(display, MIPI_LP_RX_TIMEOUT(display, port),
intel_dsi->lp_rx_timeout);
- intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_de_write(display, MIPI_TURN_AROUND_TIMEOUT(display, port),
intel_dsi->turn_arnd_val);
- intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_de_write(display, MIPI_DEVICE_RESET_TIMER(display, port),
intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
@@ -1429,16 +1430,16 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- intel_de_write(dev_priv,
- MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_de_write(display,
+ MIPI_INIT_COUNT(display, port == PORT_A ? PORT_C : PORT_A),
intel_dsi->init_count);
}
/* recovery disables */
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), tmp);
/* in terms of low power clock */
- intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_de_write(display, MIPI_INIT_COUNT(display, port),
intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
@@ -1446,7 +1447,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_de_write(display, MIPI_HIGH_LOW_SWITCH_COUNT(display, port),
intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
@@ -1455,14 +1456,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_de_write(display, MIPI_CLK_LANE_TIMING(display, port),
intel_dsi->dphy_reg);
}
@@ -1471,10 +1472,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_de_write(display, MIPI_DBI_BW_CTRL(display, port),
intel_dsi->bw_timer);
- intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_de_write(display, MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi)) {
@@ -1502,13 +1503,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
break;
}
- intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt);
+ intel_de_write(display, MIPI_VIDEO_MODE_FORMAT(display, port), fmt);
}
}
}
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1518,17 +1520,17 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
- intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
+ intel_de_rmw(display, MIPI_DSI_FUNC_PRG(display, port), VID_MODE_FORMAT_MASK, 0);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x1);
}
}
@@ -1592,8 +1594,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
@@ -1879,10 +1880,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct intel_dsi *intel_dsi;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
struct drm_display_mode *current_mode;
const struct dmi_system_id *dmi_id;
enum port port;
@@ -1903,64 +1902,61 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_dsi)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(intel_dsi);
return;
}
- intel_encoder = &intel_dsi->base;
- encoder = &intel_encoder->base;
- intel_dsi->attached_connector = intel_connector;
-
- connector = &intel_connector->base;
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- "DSI %c", port_name(port));
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
- intel_encoder->compute_config = intel_dsi_compute_config;
- intel_encoder->pre_enable = intel_dsi_pre_enable;
+ encoder->compute_config = intel_dsi_compute_config;
+ encoder->pre_enable = intel_dsi_pre_enable;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->enable = bxt_dsi_enable;
- intel_encoder->disable = intel_dsi_disable;
- intel_encoder->post_disable = intel_dsi_post_disable;
- intel_encoder->get_hw_state = intel_dsi_get_hw_state;
- intel_encoder->get_config = intel_dsi_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_dsi_shutdown;
+ encoder->enable = bxt_dsi_enable;
+ encoder->disable = intel_dsi_disable;
+ encoder->post_disable = intel_dsi_post_disable;
+ encoder->get_hw_state = intel_dsi_get_hw_state;
+ encoder->get_config = intel_dsi_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_dsi_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- intel_encoder->port = port;
- intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
- intel_encoder->cloneable = 0;
+ encoder->port = port;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->cloneable = 0;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->pipe_mask = BIT(PIPE_A);
+ encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->pipe_mask = BIT(PIPE_B);
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL);
- if (intel_connector->panel.vbt.dsi.config->dual_link)
+ if (connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
- intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+ if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1980,7 +1976,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
/* Use clock read-back from current hw-state for fastboot */
- current_mode = intel_encoder_current_mode(intel_encoder);
+ current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
@@ -1996,22 +1992,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
intel_dsi_vbt_gpio_init(intel_dsi,
- intel_dsi_get_hw_state(intel_encoder, &pipe));
+ intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs,
+ drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
mutex_lock(&dev_priv->drm.mode_config.mutex);
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ if (!intel_panel_preferred_fixed_mode(connector)) {
drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
@@ -2024,18 +2020,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
quirk_func(intel_dsi);
}
- intel_panel_init(intel_connector, NULL);
+ intel_panel_init(connector, NULL);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
- vlv_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(connector);
return;
err_cleanup_connector:
- drm_connector_cleanup(&intel_connector->base);
+ drm_connector_cleanup(&connector->base);
err:
- drm_encoder_cleanup(&intel_encoder->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
- kfree(intel_connector);
+ kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index ae0a0b11bae3..70c5a13a3c75 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -365,13 +365,13 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- u32 temp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 temp;
- temp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ temp = intel_de_read(display, MIPI_CTRL(display, port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- intel_de_write(dev_priv, MIPI_CTRL(port),
+ intel_de_write(display, MIPI_CTRL(display, port),
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
@@ -570,24 +570,24 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
+ intel_de_rmw(display, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
- intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
}
static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index abbe427e462e..c1126d170ec6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,26 +11,23 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
-#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+#define _MIPI_MMIO_BASE(display) ((display)->dsi.mmio_base)
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
-#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+#define _MMIO_MIPI(base, port, a, c) _MMIO((base) + _MIPI_PORT(port, a, c))
/* BXT MIPI mode configure */
-#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
-#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
-#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+#define _BXT_MIPIA_TRANS_HACTIVE 0xb0f8
+#define _BXT_MIPIC_TRANS_HACTIVE 0xb8f8
+#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
-#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
-#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
-#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+#define _BXT_MIPIA_TRANS_VACTIVE 0xb0fc
+#define _BXT_MIPIC_TRANS_VACTIVE 0xb8fc
+#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
-#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
-#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
-#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
- _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+#define _BXT_MIPIA_TRANS_VTOTAL 0xb100
+#define _BXT_MIPIC_TRANS_VTOTAL 0xb900
+#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -38,14 +35,14 @@
#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
#define HS_IO_CTRL_SELECT (1 << 0)
-#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
-#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+#define _MIPIA_PORT_CTRL 0x61190
+#define _MIPIC_PORT_CTRL 0x61700
+#define VLV_MIPI_PORT_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
/* BXT port control */
-#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
-#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
-#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+#define _BXT_MIPIA_PORT_CTRL 0xb0c0
+#define _BXT_MIPIC_PORT_CTRL 0xb8c0
+#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
@@ -87,20 +84,17 @@
#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
-#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
-#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define _MIPIA_TEARING_CTRL 0x61194
+#define _MIPIC_TEARING_CTRL 0x61704
+#define VLV_MIPI_TEARING_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
-/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
-
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
-#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
-#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
+#define _MIPIA_DEVICE_READY 0xb000
+#define _MIPIC_DEVICE_READY 0xb800
+#define MIPI_DEVICE_READY(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -108,12 +102,12 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
-#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
-#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
-#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
-#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
+#define _MIPIA_INTR_STAT 0xb004
+#define _MIPIC_INTR_STAT 0xb804
+#define MIPI_INTR_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN 0xb008
+#define _MIPIC_INTR_EN 0xb808
+#define MIPI_INTR_EN(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -147,9 +141,9 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG 0xb00c
+#define _MIPIC_DSI_FUNC_PRG 0xb80c
+#define MIPI_DSI_FUNC_PRG(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -170,77 +164,77 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT 0xb010
+#define _MIPIC_HS_TX_TIMEOUT 0xb810
+#define MIPI_HS_TX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT 0xb014
+#define _MIPIC_LP_RX_TIMEOUT 0xb814
+#define MIPI_LP_RX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT 0xb018
+#define _MIPIC_TURN_AROUND_TIMEOUT 0xb818
+#define MIPI_TURN_AROUND_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER 0xb01c
+#define _MIPIC_DEVICE_RESET_TIMER 0xb81c
+#define MIPI_DEVICE_RESET_TIMER(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
-#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION 0xb020
+#define _MIPIC_DPI_RESOLUTION 0xb820
+#define MIPI_DPI_RESOLUTION(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE 0xb024
+#define _MIPIC_DBI_FIFO_THROTTLE 0xb824
+#define MIPI_DBI_FIFO_THROTTLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT 0xb028
+#define _MIPIC_HSYNC_PADDING_COUNT 0xb828
+#define MIPI_HSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
-#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
-#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
+#define _MIPIA_HBP_COUNT 0xb02c
+#define _MIPIC_HBP_COUNT 0xb82c
+#define MIPI_HBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
-#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
-#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
+#define _MIPIA_HFP_COUNT 0xb030
+#define _MIPIC_HFP_COUNT 0xb830
+#define MIPI_HFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+#define _MIPIA_HACTIVE_AREA_COUNT 0xb034
+#define _MIPIC_HACTIVE_AREA_COUNT 0xb834
+#define MIPI_HACTIVE_AREA_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+#define _MIPIA_VSYNC_PADDING_COUNT 0xb038
+#define _MIPIC_VSYNC_PADDING_COUNT 0xb838
+#define MIPI_VSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
-#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
-#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
+#define _MIPIA_VBP_COUNT 0xb03c
+#define _MIPIC_VBP_COUNT 0xb83c
+#define MIPI_VBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
-#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
-#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
+#define _MIPIA_VFP_COUNT 0xb040
+#define _MIPIC_VFP_COUNT 0xb840
+#define MIPI_VFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT 0xb044
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT 0xb844
+#define MIPI_HIGH_LOW_SWITCH_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
-#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
-#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL 0xb048
+#define _MIPIC_DPI_CONTROL 0xb848
+#define MIPI_DPI_CONTROL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -249,28 +243,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
-#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
-#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
+#define _MIPIA_DPI_DATA 0xb04c
+#define _MIPIC_DPI_DATA 0xb84c
+#define MIPI_DPI_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
-#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
-#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
+#define _MIPIA_INIT_COUNT 0xb050
+#define _MIPIC_INIT_COUNT 0xb850
+#define MIPI_INIT_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
- _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE 0xb054
+#define _MIPIC_MAX_RETURN_PKT_SIZE 0xb854
+#define MIPI_MAX_RETURN_PKT_SIZE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT 0xb058
+#define _MIPIC_VIDEO_MODE_FORMAT 0xb858
+#define MIPI_VIDEO_MODE_FORMAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -278,9 +271,9 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
-#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
-#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE 0xb05c
+#define _MIPIC_EOT_DISABLE 0xb85c
+#define MIPI_EOT_DISABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
@@ -292,36 +285,36 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
-#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
-#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK 0xb060
+#define _MIPIC_LP_BYTECLK 0xb860
+#define MIPI_LP_BYTECLK(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
-#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+#define _MIPIA_TLPX_TIME_COUNT 0xb0a4
+#define _MIPIC_TLPX_TIME_COUNT 0xb8a4
+#define MIPI_TLPX_TIME_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
-#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+#define _MIPIA_CLK_LANE_TIMING 0xb098
+#define _MIPIC_CLK_LANE_TIMING 0xb898
+#define MIPI_CLK_LANE_TIMING(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
-#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
-#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA 0xb064
+#define _MIPIC_LP_GEN_DATA 0xb864
+#define MIPI_LP_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
-#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
-#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
-#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
-#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA 0xb068
+#define _MIPIC_HS_GEN_DATA 0xb868
+#define MIPI_HS_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL 0xb06c
+#define _MIPIC_LP_GEN_CTRL 0xb86c
+#define MIPI_LP_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL 0xb070
+#define _MIPIC_HS_GEN_CTRL 0xb870
+#define MIPI_HS_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -332,9 +325,9 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
-#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT 0xb074
+#define _MIPIC_GEN_FIFO_STAT 0xb874
+#define MIPI_GEN_FIFO_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -350,16 +343,16 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE 0xb078
+#define _MIPIC_HS_LS_DBI_ENABLE 0xb878
+#define MIPI_HS_LP_DBI_ENABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
-#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
-#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM 0xb080
+#define _MIPIC_DPHY_PARAM 0xb880
+#define MIPI_DPHY_PARAM(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -369,34 +362,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
-#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
+#define _MIPIA_DBI_BW_CTRL 0xb084
+#define _MIPIC_DBI_BW_CTRL 0xb884
+#define MIPI_DBI_BW_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT 0xb088
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT 0xb888
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
-#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL 0xb08c
+#define _MIPIC_STOP_STATE_STALL 0xb88c
+#define MIPI_STOP_STATE_STALL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
-#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
-#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 0xb090
+#define _MIPIC_INTR_STAT_REG_1 0xb890
+#define MIPI_INTR_STAT_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 0xb094
+#define _MIPIC_INTR_EN_REG_1 0xb894
+#define MIPI_INTR_EN_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL(display) (_MIPI_MMIO_BASE(display) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -409,9 +402,9 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
-#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
-#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
+#define _MIPIA_CTRL 0xb104
+#define _MIPIC_CTRL 0xb904
+#define MIPI_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -442,41 +435,41 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
-#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
-#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS 0xb108
+#define _MIPIC_DATA_ADDRESS 0xb908
+#define MIPI_DATA_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
-#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
-#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH 0xb10c
+#define _MIPIC_DATA_LENGTH 0xb90c
+#define MIPI_DATA_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
-#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS 0xb110
+#define _MIPIC_COMMAND_ADDRESS 0xb910
+#define MIPI_COMMAND_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
-#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH 0xb114
+#define _MIPIC_COMMAND_LENGTH 0xb914
+#define MIPI_COMMAND_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
-#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 0xb118
+#define _MIPIC_READ_DATA_RETURN0 0xb918
+#define MIPI_READ_DATA_RETURN(display, port, n) _MMIO_MIPI(_MIPI_MMIO_BASE(display) + 4 * (n), port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
-#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
-#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID 0xb138
+#define _MIPIC_READ_DATA_VALID 0xb938
+#define MIPI_READ_DATA_VALID(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
#endif /* __VLV_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dcbfe32fd30c..81f65cab1330 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -879,6 +879,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = fpriv->i915;
int ret = 0;
switch (args->param) {
@@ -904,6 +905,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
+ case I915_CONTEXT_PARAM_LOW_LATENCY:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
+ else
+ ret = -EINVAL;
+ break;
+
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
@@ -992,6 +1000,9 @@ static int intel_context_set_gem(struct intel_context *ce,
if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
ret = intel_context_reconfigure_sseu(ce, sseu);
+ if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
+ __set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
+
return ret;
}
@@ -1630,6 +1641,9 @@ i915_gem_create_context(struct drm_i915_private *i915,
if (vm)
ctx->vm = vm;
+ /* Assign early so intel_context_set_gem can access these flags */
+ ctx->user_flags = pc->user_flags;
+
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
i915_gem_context_set_user_engines(ctx);
@@ -1652,8 +1666,6 @@ i915_gem_create_context(struct drm_i915_private *i915,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);
- ctx->user_flags = pc->user_flags;
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 03bc7f9d191b..b6d97da63d1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -338,6 +338,7 @@ struct i915_gem_context {
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
#define UCONTEXT_PERSISTENCE 4
+#define UCONTEXT_LOW_LATENCY 5
/**
* @flags: small set of booleans
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3a771afb083..090724fa766c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2457,7 +2457,7 @@ static int eb_submit(struct i915_execbuffer *eb)
* The engine index is returned.
*/
static unsigned int
-gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+gen8_dispatch_bsd_engine(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2465,7 +2465,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
+ get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d287..5d7446a48ae7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+ /* TODO: make DPT shrinkable when it has no bound vmas */
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+ !obj->is_dpt;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0c5cdab278b6..1495b6074492 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -386,7 +386,7 @@ struct drm_i915_gem_object {
* and kernel mode driver for caching policy control after GEN12.
* In the meantime platform specific tables are created to translate
* i915_cache_level into pat index, for more details check the macros
- * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
+ * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL.
* For backward compatibility, this field contains values exactly match
* the entries of enum i915_cache_level for pre-GEN12 platforms (See
* LEGACY_CACHELEVEL), so that the PTE encode functions for these
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 0ba955611dfb..8780aa243105 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_cache.h>
+#include <linux/vmalloc.h>
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 38b72d86560f..c5e1c718a6d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -654,7 +654,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
@@ -663,8 +663,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
resource_size_t offset;
int err;
- GEM_WARN_ON(IS_DGFX(dev_priv));
- obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ GEM_WARN_ON(IS_DGFX(i915));
+ obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 258381d1c054..dfe0db8bb1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -14,14 +14,14 @@ struct drm_i915_gem_object;
#define i915_stolen_fb drm_mm_node
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
@@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a049ca0b7980..d9eb84c1d2f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -343,12 +343,12 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -374,9 +374,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -427,11 +427,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -447,10 +447,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
@@ -459,7 +459,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 61abfb505766..09b68713ab32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -463,13 +463,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
struct drm_file *file)
{
static struct lock_class_key __maybe_unused lock_class;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object __maybe_unused *obj;
int __maybe_unused ret;
u32 __maybe_unused handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -501,7 +501,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!to_gt(dev_priv)->vm->has_read_only)
+ if (!to_gt(i915)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 3ff3d8889c6c..84d41e6ccf05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
{
struct drm_i915_private *i915 = arg;
unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long max_pages;
@@ -857,7 +857,7 @@ out:
static int igt_ppgtt_64K(void *arg)
{
struct drm_i915_private *i915 = arg;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
@@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
};
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
struct i915_ppgtt *ppgtt;
int err;
- dev_priv = mock_gem_device();
- if (!dev_priv)
+ i915 = mock_gem_device();
+ if (!i915)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(i915)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mock_destroy_device(dev_priv);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 10a7847f1b04..bac15196b4d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (gen < 12)
return true;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
return HAS_DISPLAY(i915);
@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
src_pitch = t->width; /* in dwords */
if (src->tiling == CLIENT_TILING_Y) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
} else if (src->tiling == CLIENT_TILING_X) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
dst_pitch = t->width; /* in dwords */
if (dst->tiling == CLIENT_TILING_Y) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
} else if (dst->tiling == CLIENT_TILING_X) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
v += x;
swizzle = gt->ggtt->bit_6_swizzle_x;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d684a70f2c04..3527b8f446fe 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_selftest.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "mock_context.h"
#include "mock_dmabuf.h"
@@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915,
struct file *file;
u32 *vaddr;
int err = 0, i;
+ unsigned int mode;
file = mock_file(i915);
if (IS_ERR(file))
@@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
+ vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_file;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b2a5882b8f81..075657018739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
#include "mock_dmabuf.h"
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index e1bf13e3d307..e9f65f27b53f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -189,9 +189,6 @@ static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
- if (IS_PONTEVECCHIO(engine->i915))
- return false;
-
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
@@ -743,21 +740,25 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
}
/* Wa_14014475959:dg2 */
-#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
-static u32 ccs_semaphore_offset(struct i915_request *rq)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+#define HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 hold_switchout_semaphore_offset(struct i915_request *rq)
{
return i915_ggtt_offset(rq->context->state) +
- (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+ (LRC_PPHWSP_PN * PAGE_SIZE) + HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET;
}
/* Wa_14014475959:dg2 */
-static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+/* Wa_16019325821 */
+/* Wa_14019159160 */
+static u32 *hold_switchout_emit_wa_busywait(struct i915_request *rq, u32 *cs)
{
int i;
*cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
MI_ATOMIC_MOVE;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
*cs++ = 1;
@@ -773,7 +774,7 @@ static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = hold_switchout_semaphore_offset(rq);
*cs++ = 0;
return cs;
@@ -790,8 +791,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
cs = gen12_emit_preempt_busywait(rq, cs);
/* Wa_14014475959:dg2 */
- if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
- cs = ccs_emit_wa_busywait(rq, cs);
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (intel_engine_uses_wa_hold_switchout(rq->engine))
+ cs = hold_switchout_emit_wa_busywait(rq, cs);
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -827,7 +830,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index fa46d2308b0e..398d60a66410 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -500,11 +500,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
static void
-xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- struct sgt_dma *iter,
- unsigned int pat_index,
- u32 flags)
+xehp_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ struct sgt_dma *iter,
+ unsigned int pat_index,
+ u32 flags)
{
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
@@ -741,8 +741,8 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma_res);
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
- xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
+ if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55))
+ xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
else
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
} else {
@@ -781,11 +781,11 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
-static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
struct i915_page_directory * const pdp =
@@ -810,15 +810,15 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
}
-static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
+static void xehp_ppgtt_insert_entry(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
{
if (flags & PTE_LM)
- return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
- pat_index, flags);
+ return xehp_ppgtt_insert_entry_lm(vm, addr, offset,
+ pat_index, flags);
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
}
@@ -961,6 +961,9 @@ static int gen8_init_rsvd(struct i915_address_space *vm)
struct i915_vma *vma;
int ret;
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
/* The memory will be used only by GPU. */
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
I915_BO_ALLOC_VOLATILE |
@@ -1042,7 +1045,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
if (HAS_64K_PAGES(gt->i915))
- ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
+ ppgtt->vm.insert_page = xehp_ppgtt_insert_entry;
else
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d650beb8ed22..20b9b04ec1e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+ /* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
+
+ /* And confirm that we still want irqs enabled before we yield */
+ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+ intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
- cond_resched();
- }
+ irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- if (!b->irq_armed || __i915_request_is_complete(rq))
+ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 7eccbd70d89f..ed95a7b57cbb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -130,6 +130,7 @@ struct intel_context {
#define CONTEXT_PERMA_PIN 11
#define CONTEXT_IS_PARKING 12
#define CONTEXT_EXITING 13
+#define CONTEXT_LOW_LATENCY 14
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1ade568ffbfa..3b740ca25000 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -497,9 +497,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
- if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
- __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
- engine->class == RENDER_CLASS)
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
/* features common between engines sharing EUs */
@@ -589,7 +588,7 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -610,7 +609,7 @@ u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
- if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
@@ -679,7 +678,7 @@ void intel_engines_release(struct intel_gt *gt)
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
@@ -765,14 +764,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
- if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
@@ -839,38 +838,6 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
}
}
-static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_gt_info *info = &gt->info;
- unsigned long meml3_mask;
- unsigned long quad;
-
- if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
- return;
-
- meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
- meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
-
- /*
- * Link Copy engines may be fused off according to meml3_mask. Each
- * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
- */
- for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
- unsigned int instance = quad * 2 + 1;
- intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
- _BCS(instance));
-
- if (mask & info->engine_mask) {
- gt_dbg(gt, "bcs%u fused off\n", instance);
- gt_dbg(gt, "bcs%u fused off\n", instance + 1);
-
- info->engine_mask &= ~mask;
- }
- }
-}
-
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
@@ -889,7 +856,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
- engine_mask_apply_copy_fuses(gt);
/*
* The only use of the GSC CS is to load and communicate with the GSC
@@ -908,6 +874,29 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /*
+ * Store the number of active cslices before
+ * changing the CCS engine configuration
+ */
+ gt->ccs.cslices = CCS_MASK(gt);
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
@@ -1193,7 +1182,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = xehp_regs;
num = ARRAY_SIZE(xehp_regs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 96bdb93a948d..fb7bff27b45a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
-
if (engine->park)
engine->park(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 960e6be2042f..ba55c059063d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -586,7 +586,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
-#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
+#define I915_ENGINE_USES_WA_HOLD_SWITCHOUT BIT(12)
unsigned int flags;
/*
@@ -696,10 +696,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
}
/* Wa_14014475959:dg2 */
+/* Wa_16019325821 */
+/* Wa_14019159160 */
static inline bool
-intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+intel_engine_uses_wa_hold_switchout(struct intel_engine_cs *engine)
{
- return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
}
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42aade0faf2d..21829439e686 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq)
/* Use a fixed tag for OA and friends */
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
ce->lrc.ccid = ce->tag;
- } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
/* We don't need a strict matching tag, just different values */
unsigned int tag = ffs(READ_ONCE(engine->context_tag));
@@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
intel_engine_add_retire(engine, ce->timeline);
ccid = ce->lrc.ccid;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
ccid &= XEHP_MAX_CONTEXT_HW_ID;
} else {
@@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
promote = xehp_csb_parse(csb);
else if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb);
@@ -2898,7 +2898,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
+ intel_gt_reset_engine(engine);
}
/*
@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
+
+ /* Reset upon idling, or we may delay the busy wakeup. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
@@ -3479,7 +3482,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
}
}
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
if (intel_engine_has_preemption(engine))
engine->emit_bb_start = xehp_emit_bb_start;
else
@@ -3582,7 +3585,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (GRAPHICS_VER(engine->i915) >= 11 &&
- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index ec1cbe229f0e..0d0a0dc9f610 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -231,11 +231,8 @@ static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
-
- intel_guc_invalidate_tlb_guc(guc);
- }
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
+ intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -246,7 +243,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
+ if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
guc_ggtt_ct_invalidate(gt);
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 6d440de8ba01..1e925c75fb08 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -103,19 +103,6 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
-static const struct gsc_def gsc_def_xehpsdv[] = {
- {
- /* HECI1 not enabled on the device. */
- },
- {
- .name = "mei-gscfi",
- .bar = DG1_GSC_HECI2_BASE,
- .bar_size = GSC_BAR_LENGTH,
- .use_polling = true,
- .slow_firmware = true,
- }
-};
-
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
@@ -188,8 +175,6 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
- } else if (IS_XEHPSDV(i915)) {
- def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a425db5ed3a2..626b166e67ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
intel_uncore_posting_read(uncore,
XELPMP_RING_FAULT_REG);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
@@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
@@ -832,7 +832,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 608f5c872928..b5e114d284ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,17 +82,18 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
-#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
- engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
-
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -123,6 +124,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return guc_to_gt(guc)->i915;
}
+static inline struct intel_guc *gt_to_guc(struct intel_gt *gt)
+{
+ return &gt->uc.guc;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 000000000000..3c62a44e9106
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return 0;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (gt->ccs.cslices & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 000000000000..55547f2ff426
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 77fb57223465..ad4c51f18d3a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -68,9 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
- return guc_irq_handler(&gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(gt), iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
- return guc_irq_handler(&media_gt->uc.guc, iir);
+ return guc_irq_handler(gt_to_guc(media_gt), iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
@@ -442,7 +442,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(iir)) {
gen6_rps_irq_handler(&gt->rps, iir);
- guc_irq_handler(&gt->uc.guc, iir >> 16);
+ guc_irq_handler(gt_to_guc(gt), iir >> 16);
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index e253750a51c5..b8912bd6c08e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -57,51 +57,18 @@ static const struct intel_mmio_range icl_l3bank_steering_table[] = {
* are of a "GAM" subclass that has special rules. Thus we use a separate
* GAM table farther down for those.
*/
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+static const struct intel_mmio_range dg2_mslice_steering_table[] = {
{ 0x00DD00, 0x00DDFF },
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
{},
};
-static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D880, 0x00D8FF },
{},
};
-/*
- * We have several types of MCR registers on PVC where steering to (0,0)
- * will always provide us with a non-terminated value. We'll stick them
- * all in the same table for simplicity.
- */
-static const struct intel_mmio_range pvc_instance0_steering_table[] = {
- { 0x004000, 0x004AFF }, /* HALF-BSLICE */
- { 0x008800, 0x00887F }, /* CC */
- { 0x008A80, 0x008AFF }, /* TILEPSMI */
- { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
- { 0x00B100, 0x00B3FF }, /* L3BANK */
- { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
- { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
- { 0x00DD00, 0x00DDFF }, /* BSLICE */
- { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
- { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
- { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
- { 0x024180, 0x0241FF }, /* HALF-BSLICE */
- {},
-};
-
static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
{ 0x000B00, 0x000BFF }, /* SQIDI */
{ 0x001000, 0x001FFF }, /* SQIDI */
@@ -185,22 +152,16 @@ void intel_gt_mcr_init(struct intel_gt *gt)
gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
gt->steering_table[DSS] = xelpg_dss_steering_table;
- } else if (IS_PONTEVECCHIO(i915)) {
- gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
} else if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[MSLICE] = dg2_mslice_steering_table;
gt->steering_table[LNCF] = dg2_lncf_steering_table;
/*
* No need to hook up the GAM table since it has a dedicated
* steering control register on DG2 and can use implicit
* steering.
*/
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- gt->steering_table[GAM] = xehpsdv_gam_steering_table;
} else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
@@ -821,8 +782,6 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
for (int i = 0; i < NUM_STEERING_TYPES; i++)
if (gt->steering_table[i])
report_steering_type(p, gt, i, dump_table);
- } else if (IS_PONTEVECCHIO(gt->i915)) {
- report_steering_type(p, gt, INSTANCE0, dump_table);
} else if (HAS_MSLICE_STEERING(gt->i915)) {
report_steering_type(p, gt, MSLICE, dump_table);
report_steering_type(p, gt, LNCF, dump_table);
@@ -842,10 +801,7 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance)
{
- if (IS_PONTEVECCHIO(gt->i915)) {
- *group = dss / GEN_DSS_PER_CSLICE;
- *instance = dss % GEN_DSS_PER_CSLICE;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index 01ac565a56a4..a67a4c35a4fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
* the topology, so we lookup the DSS ID directly in "slice 0."
*/
#define _HAS_SS(ss_, gt_, group_, instance_) ( \
- GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
+ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 220ac4f92edf..c08fdb65cc69 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -159,7 +159,7 @@ static bool reset_engines(struct intel_gt *gt)
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ return intel_gt_reset_all_engines(gt) == 0;
}
static void gt_sanitize(struct intel_gt *gt, bool force)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 7114c116e928..4fcba42cfe34 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -392,10 +392,6 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
-
intel_runtime_pm_put(uncore->rpm, wakeref);
}
@@ -538,7 +534,7 @@ static bool rps_eval(void *data)
{
struct intel_gt *gt = data;
- if (intel_guc_slpc_is_used(&gt->uc.guc))
+ if (intel_guc_slpc_is_used(gt_to_guc(gt)))
return false;
else
return HAS_RPS(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 50962cfd1353..e42b3a5d4e63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -718,44 +718,11 @@
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
-#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
-#define UNSLCGCTL9440 _MMIO(0x9440)
-#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
-#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
-#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
-#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
-
-#define UNSLCGCTL9444 _MMIO(0x9444)
-#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
-#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
-#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
-#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
-#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
-#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
-#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
-#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
-#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
-#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
-#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
-#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
-#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
-#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
-#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
-#define LTCDD_CLKGATE_DIS REG_BIT(10)
-
#define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
@@ -765,9 +732,6 @@
#define L3_CLKGATE_DIS REG_BIT(16)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
-#define SCCGCTL94DC MCR_REG(0x94dc)
-#define CG3DDISURB REG_BIT(14)
-
#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
@@ -989,10 +953,6 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
-#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
-#define XEHPC_HOSTCACHEEN REG_BIT(1)
-#define XEHPC_OVRLSCCC REG_BIT(0)
-
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
/* MOCS (Memory Object Control State) registers */
@@ -1046,20 +1006,9 @@
#define XEHP_L3SQCREG5 MCR_REG(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
-#define MLTICTXCTL MCR_REG(0xb170)
-#define TDONRENDER REG_BIT(2)
-
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
-#define XEHPC_L3SCRUB MCR_REG(0xb18c)
-#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
-#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
-#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
-
-#define L3SQCREG1_CCS0 MCR_REG(0xb200)
-#define FLUSHALLNONCOH REG_BIT(5)
-
#define GEN11_GLBLINVL _MMIO(0xb404)
#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
@@ -1109,7 +1058,6 @@
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
-#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
#define COMP_MOD_CTRL MCR_REG(0xcf30)
#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
@@ -1185,7 +1133,6 @@
#define EU_PERF_CNTL4 PERF_REG(0xe45c)
#define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c)
-#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
@@ -1202,7 +1149,6 @@
#define FLOW_CONTROL_ENABLE REG_BIT(15)
#define UGM_BACKUP_MODE REG_BIT(13)
#define MDQ_ARBITRATION_MODE REG_BIT(12)
-#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
#define STALL_DOP_GATING_DISABLE REG_BIT(5)
#define THROTTLE_12_5 REG_GENMASK(4, 2)
@@ -1215,6 +1161,7 @@
#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define XELPG_DISABLE_TDL_SVHS_GATING REG_BIT(1)
#define GEN12_DISABLE_DOP_GATING REG_BIT(0)
#define RT_CTRL MCR_REG(0xe530)
@@ -1477,8 +1424,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
@@ -1679,11 +1632,6 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
-#define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004)
-#define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008)
-#define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068)
-#define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c)
-
/*
* Standalone Media's non-engine GT registers are located at their regular GT
* offsets plus 0x380000. This extra offset is stored inside the intel_uncore
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index c0b202223940..d7784650e4d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -442,7 +442,7 @@ static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
}
@@ -452,7 +452,7 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
int err;
u32 val;
@@ -573,7 +573,6 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
intel_wakeref_t wakeref;
u32 mode;
@@ -581,20 +580,12 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
*/
- if (IS_XEHPSDV(gt->i915) &&
- slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
- /*
- * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
- * the media_ratio_mode, just return the cached media ratio
- */
- mode = slpc->media_ratio_mode;
- } else {
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
- mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
- SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
- }
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
}
@@ -604,7 +595,7 @@ static ssize_t media_freq_factor_store(struct kobject *kobj,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
u32 factor, mode;
int err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index def7dd0eb6f1..cfdd2ad5e954 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
+ struct {
+ /*
+ * Mask of the non fused CCS slices
+ * to be used for the load balancing
+ */
+ intel_engine_mask_t cslices;
+ } ccs;
+
/*
* Default address space (either GGTT or ppGTT depending on arch).
*
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7811a8c9da06..30b128b1fde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt);
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7c367ba8d9dc..b387146ede98 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -546,47 +546,6 @@ static const u8 gen12_rcs_offsets[] = {
END
};
-static const u8 xehp_rcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
-
- NOP(5),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- LRI(3, POSTED),
- REG(0x1b0),
- REG16(0x5a8),
- REG16(0x5ac),
-
- NOP(6),
- LRI(1, 0),
- REG(0x0c8),
-
- END
-};
-
static const u8 dg2_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
@@ -695,8 +654,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
return mtl_rcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_rcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
- return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 11)
@@ -719,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60;
@@ -733,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x80;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x70;
@@ -748,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x84;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74;
@@ -795,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
* simply to match the RCS context image layout.
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 576e5ef0289b..6f7af4077135 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -35,9 +35,9 @@ static bool engine_supports_migration(struct intel_engine_cs *engine)
return true;
}
-static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_toggle_pdes(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -52,9 +52,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
d->offset += SZ_2M;
}
-static void xehpsdv_insert_pte(struct i915_address_space *vm,
- struct i915_page_table *pt,
- void *data)
+static void xehp_insert_pte(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
{
struct insert_pte_data *d = data;
@@ -120,7 +120,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
* 512 entry layout using 4K GTT pages. The other two windows just map
* lmem pages and must use the new compact 32 entry layout using 64K GTT
* pages, which ensures we can address any lmem object that the user
- * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
+ * throws at us. We then also use the xehp_toggle_pdes as a way of
* just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
* compact layout for each of these page-tables, that fall within the
* [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
@@ -209,12 +209,12 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
if (HAS_64K_PAGES(gt->i915)) {
vm->vm.foreach(&vm->vm, base, d.offset - base,
- xehpsdv_insert_pte, &d);
+ xehp_insert_pte, &d);
d.offset = base + CHUNK_SZ;
vm->vm.foreach(&vm->vm,
d.offset,
2 * CHUNK_SZ,
- xehpsdv_toggle_pdes, &d);
+ xehp_toggle_pdes, &d);
} else {
vm->vm.foreach(&vm->vm, base, d.offset - base,
insert_pte, &d);
@@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8)
ring_sz = 8;
@@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 25c1023eb5f9..d791d63d49b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -53,7 +53,6 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
-#define PVC_NUM_MOCS_ENTRIES 3
#define MTL_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
@@ -367,31 +366,6 @@ static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
L3_3_WB),
};
-static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
- /* wa_1608975824 */
- MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
-
- /* UC - Coherent; GO:L3 */
- MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
- /* UC - Coherent; GO:Memory */
- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
- /* UC - Non-Coherent; GO:Memory */
- MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
- /* UC - Non-Coherent; GO:L3 */
- MOCS_ENTRY(4, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
-
- /* HW Reserved - SW program but never use. */
- MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
- MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
- MOCS_ENTRY(60, 0, L3_1_UC),
- MOCS_ENTRY(61, 0, L3_1_UC),
- MOCS_ENTRY(62, 0, L3_1_UC),
- MOCS_ENTRY(63, 0, L3_1_UC),
-};
-
static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
/* UC - Coherent; GO:L3 */
MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
@@ -404,17 +378,6 @@ static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
-static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
- /* Error */
- MOCS_ENTRY(0, 0, L3_3_WB),
-
- /* UC */
- MOCS_ENTRY(1, 0, L3_1_UC),
-
- /* WB */
- MOCS_ENTRY(2, 0, L3_3_WB),
-};
-
static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
/* Error - Reserved for Non-Use */
MOCS_ENTRY(0,
@@ -501,25 +464,12 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915,
table->n_entries = MTL_NUM_MOCS_ENTRIES;
table->uc_index = 9;
table->unused_entries_index = 1;
- } else if (IS_PONTEVECCHIO(i915)) {
- table->size = ARRAY_SIZE(pvc_mocs_table);
- table->table = pvc_mocs_table;
- table->n_entries = PVC_NUM_MOCS_ENTRIES;
- table->uc_index = 1;
- table->wb_index = 2;
- table->unused_entries_index = 2;
} else if (IS_DG2(i915)) {
table->size = ARRAY_SIZE(dg2_mocs_table);
table->table = dg2_mocs_table;
table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->unused_entries_index = 3;
- } else if (IS_XEHPSDV(i915)) {
- table->size = ARRAY_SIZE(xehpsdv_mocs_table);
- table->table = xehpsdv_mocs_table;
- table->uc_index = 2;
- table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->unused_entries_index = 5;
} else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
@@ -670,7 +620,7 @@ static void init_l3cc_table(struct intel_gt *gt,
intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 8f4b3c8af09c..c864d101faf9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* thus allowing GuC to control RC6 entry/exit fully instead.
* We will not set the HW ENABLE and EI bits
*/
- if (!intel_guc_rc_enable(&gt->uc.guc))
+ if (!intel_guc_rc_enable(gt_to_guc(gt)))
rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
else
rc6->ctl_enable =
@@ -569,7 +569,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
struct intel_gt *gt = rc6_to_gt(rc6);
/* Take control of RC6 back from GuC */
- intel_guc_rc_disable(&gt->uc.guc);
+ intel_guc_rc_disable(gt_to_guc(gt));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c8e9aa41fdea..6161f7a3ff70 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -764,7 +764,7 @@ wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
HECI_H_GS1_ER_PREP, 0);
}
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -879,8 +879,17 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- /* For GuC mode, ensure submission is disabled before stopping ring */
- intel_uc_reset_prepare(&gt->uc);
+ /**
+ * For GuC mode with submission enabled, ensure submission
+ * is disabled before stopping ring.
+ *
+ * For GuC mode with submission disabled, ensure that GuC is not
+ * sanitized, do that after engine reset. reset_prepare()
+ * is followed by engine reset which in this mode requires GuC to
+ * process any CSB FIFO entries generated by the resets.
+ */
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
@@ -978,7 +987,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
@@ -1089,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ ok = intel_gt_reset_all_engines(gt) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
@@ -1133,10 +1142,10 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
}
if (err)
return err;
@@ -1227,6 +1236,9 @@ void intel_gt_reset(struct intel_gt *gt,
intel_overlay_reset(gt->i915);
+ /* sanitize uC after engine reset */
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ intel_uc_reset_prepare(&gt->uc);
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1270,7 +1282,30 @@ error:
goto finish;
}
-static int intel_gt_reset_engine(struct intel_engine_cs *engine)
+/**
+ * intel_gt_reset_all_engines() - Reset all engines in the given gt.
+ * @gt: the GT to reset all engines for.
+ *
+ * This function resets all engines within the given gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_all_engines(struct intel_gt *gt)
+{
+ return __intel_gt_reset(gt, ALL_ENGINES);
+}
+
+/**
+ * intel_gt_reset_engine() - Reset a specific engine within a gt.
+ * @engine: engine to be reset.
+ *
+ * This function resets the specified engine within a gt.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index f615b30b81c5..c00de353075c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -54,7 +54,8 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
-int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
+int intel_gt_reset_engine(struct intel_engine_cs *engine);
+int intel_gt_reset_all_engines(struct intel_gt *gt);
int intel_reset_guc(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 4feef874e6d6..c9cb2a391942 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -52,7 +52,7 @@ static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- return &gt->uc.guc.slpc;
+ return &gt_to_guc(gt)->slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps)
@@ -1013,6 +1013,10 @@ void intel_rps_boost(struct i915_request *rq)
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
+ /* Waitboost is not needed for contexts marked with a Freq hint */
+ if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags))
+ return;
+
/* Serializes with i915_request_retire() */
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
@@ -1086,11 +1090,7 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
- if (IS_PONTEVECCHIO(i915))
- return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
- else if (IS_XEHPSDV(i915))
- return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
- else if (IS_GEN9_LP(i915))
+ if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
else
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6a3246240e81..c8fadf58d836 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -214,13 +214,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt)
int num_compute_regs, num_geometry_regs;
int eu;
- if (IS_PONTEVECCHIO(gt->i915)) {
- num_geometry_regs = 0;
- num_compute_regs = 2;
- } else {
- num_geometry_regs = 1;
- num_compute_regs = 1;
- }
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
/*
* The concept of slice has been removed in Xe_HP. To be compatible
@@ -642,7 +637,7 @@ void intel_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
@@ -851,7 +846,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
{
if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
sseu_print_xehp_topology(sseu, p);
else
sseu_print_hsw_topology(sseu, p);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 4bb13d1890e3..756e9ebbc725 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -132,7 +132,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index d67d44611c28..5a0f1b279a80 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,12 +10,15 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "display/intel_fbc_regs.h"
+
/**
* DOC: Hardware workarounds
*
@@ -51,7 +54,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -258,12 +262,6 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
-wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
-{
- wa_mcr_write_clr_set(wal, reg, ~0, set);
-}
-
-static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
@@ -918,12 +916,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
- else if (IS_PONTEVECCHIO(i915))
- ; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
- else if (IS_XEHPSDV(i915))
- ; /* noop; none at this time */
else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -1350,9 +1344,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
gt->steering_table[MSLICE] = NULL;
}
- if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
- gt->steering_table[GAM] = NULL;
-
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
GEN_DSS_PER_GSLICE;
@@ -1380,20 +1371,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- unsigned int dss;
-
- /*
- * Setup implicit steering for COMPUTE and DSS ranges to the first
- * non-fused-off DSS. All other types of MCR registers will be
- * explicitly steered.
- */
- dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
- __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
-}
-
-static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1520,76 +1497,6 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- xehp_init_mcr(gt, wal);
-
- /* Wa_1409757795:xehpsdv */
- wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
-
- /* Wa_16011155590:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- TSGUNIT_CLKGATE_DIS);
-
- /* Wa_14011780169:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
- wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
- GAMTLBVDBOX7_CLKGATE_DIS |
- GAMTLBVDBOX6_CLKGATE_DIS |
- GAMTLBVDBOX5_CLKGATE_DIS |
- GAMTLBVDBOX4_CLKGATE_DIS |
- GAMTLBVDBOX3_CLKGATE_DIS |
- GAMTLBVDBOX2_CLKGATE_DIS |
- GAMTLBVDBOX1_CLKGATE_DIS |
- GAMTLBVDBOX0_CLKGATE_DIS |
- GAMTLBKCR_CLKGATE_DIS |
- GAMTLBGUC_CLKGATE_DIS |
- GAMTLBBLT_CLKGATE_DIS);
- wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
- GAMTLBGFXA1_CLKGATE_DIS |
- GAMTLBCOMPA0_CLKGATE_DIS |
- GAMTLBCOMPA1_CLKGATE_DIS |
- GAMTLBCOMPB0_CLKGATE_DIS |
- GAMTLBCOMPB1_CLKGATE_DIS |
- GAMTLBCOMPC0_CLKGATE_DIS |
- GAMTLBCOMPC1_CLKGATE_DIS |
- GAMTLBCOMPD0_CLKGATE_DIS |
- GAMTLBCOMPD1_CLKGATE_DIS |
- GAMTLBMERT_CLKGATE_DIS |
- GAMTLBVEBOX3_CLKGATE_DIS |
- GAMTLBVEBOX2_CLKGATE_DIS |
- GAMTLBVEBOX1_CLKGATE_DIS |
- GAMTLBVEBOX0_CLKGATE_DIS);
- }
-
- /* Wa_16012725990:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
-
- /* Wa_14011060649:xehpsdv */
- wa_14011060649(gt, wal);
-
- /* Wa_14012362059:xehpsdv */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_14014368820:xehpsdv */
- wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
- INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
-
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-}
-
-static void
dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
xehp_init_mcr(gt, wal);
@@ -1632,27 +1539,10 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- pvc_init_mcr(gt, wal);
-
- /* Wa_14015795083 */
- wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
-
- /* Wa_18018781329 */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
-}
-
-static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1724,12 +1614,6 @@ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
- if (IS_PONTEVECCHIO(gt->i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
if (IS_DG2(gt->i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
@@ -1754,12 +1638,8 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
- else if (IS_PONTEVECCHIO(i915))
- pvc_gt_workarounds_init(gt, wal);
else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
- else if (IS_XEHPSDV(i915))
- xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -2177,30 +2057,6 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
}
}
-static void blacklist_trtt(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- /*
- * Prevent read/write access to [0x4400, 0x4600) which covers
- * the TRTT range across all engines. Note that normally userspace
- * cannot access the other engines' trtt control, but for simplicity
- * we cover the entire range on each engine.
- */
- whitelist_reg_ext(w, _MMIO(0x4400),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
- whitelist_reg_ext(w, _MMIO(0x4500),
- RING_FORCE_TO_NONPRIV_DENY |
- RING_FORCE_TO_NONPRIV_RANGE_64);
-}
-
-static void pvc_whitelist_build(struct intel_engine_cs *engine)
-{
- /* Wa_16014440446:pvc */
- blacklist_trtt(engine);
-}
-
static void xelpg_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -2227,12 +2083,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
; /* none yet */
else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
- else if (IS_PONTEVECCHIO(i915))
- pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
- else if (IS_XEHPSDV(i915))
- ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2813,10 +2665,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
- /* Wa_14014999345:pvc */
- wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
- }
+ /* boilerplate for any CCS engine workaround */
}
/*
@@ -2849,10 +2698,34 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
- if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+ u32 mode;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ mode = intel_gt_apply_ccs_mode(gt);
+ wa_masked_en(wal, XEHP_CCS_MODE, mode);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2891,10 +2764,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
+ /* Wa_14020495402 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
+ }
+
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
/*
@@ -2922,21 +2799,15 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
- IS_PONTEVECCHIO(i915) ||
IS_DG2(i915)) {
/* Wa_22014226127 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
}
- if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
- /* Wa_16015675438:dg2,pvc */
- wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
- }
-
- if (IS_DG2(i915)) {
/*
* Wa_16011620976:dg2_g11
* Wa_22015475538:dg2
@@ -2972,22 +2843,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0 /* write-only, so skip validation */,
true);
}
-
- if (IS_XEHPSDV(i915)) {
- /* Wa_1409954639 */
- wa_mcr_masked_en(wal,
- GEN8_ROW_CHICKEN,
- SYSTOLIC_DOP_CLOCK_GATING_DIS);
-
- /* Wa_1607196519 */
- wa_mcr_masked_en(wal,
- GEN9_ROW_CHICKEN4,
- GEN12_DISABLE_GRF_CLEAR);
-
- /* Wa_14010449647:xehpsdv */
- wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
- }
}
static void
@@ -3003,8 +2858,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
@@ -3068,7 +2925,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
const struct i915_range *mcr_ranges;
int i;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0dd4d00ee894..9ce8ff1c04fe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -319,7 +319,7 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
timeout = 0;
- intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
+ intel_wedge_on_timeout(&w, gt, HZ / 5 /* 200ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index f40de408cd3a..2cfc23c58e90 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -281,7 +281,7 @@ static int igt_atomic_reset(void *arg)
awake = reset_prepare(gt);
p->critical_section_begin();
- err = __intel_gt_reset(gt, ALL_ENGINES);
+ err = intel_gt_reset_all_engines(gt);
p->critical_section_end();
reset_finish(gt, awake);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 302d0540295d..4ecc4ae74a54 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -53,7 +53,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
static int slpc_set_freq(struct intel_gt *gt, u32 freq)
{
int err;
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
err = slpc_set_max_freq(slpc, freq);
if (err) {
@@ -182,7 +182,7 @@ static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct {
u64 power;
int freq;
@@ -262,7 +262,7 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
static int run_test(struct intel_gt *gt, int test_type)
{
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_guc_slpc *slpc = &gt_to_guc(gt)->slpc;
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index bccc3a1200bc..1fb6ff77fd89 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 811add10c30d..c34674e797c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -207,6 +207,27 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+struct slpc_context_frequency_request {
+ u32 frequency_request:16;
+ u32 reserved:12;
+ u32 is_compute:1;
+ u32 ignore_busyness:1;
+ u32 is_minimum:1;
+ u32 is_predefined:1;
+} __packed;
+
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+
+struct slpc_optimized_strategies {
+ u32 compute:1;
+ u32 async_flip:1;
+ u32 media:1;
+ u32 vsync_flip:1;
+ u32 reserved:28;
+} __packed;
+
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index dabeaf4f245f..00d6402333f8 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -36,6 +36,7 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 58012edd4eb0..525587cfe1af 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
@@ -101,4 +101,11 @@ enum {
GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
};
+/*
+ * Workaround keys:
+ */
+enum {
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e2e42b3e0d5d..3b69bc6616bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -298,7 +298,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- intel_guc_write_barrier(&gt->uc.guc);
+ intel_guc_write_barrier(gt_to_guc(gt));
i915_gem_object_unpin_map(gsc->fw.obj);
@@ -351,7 +351,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 40817ebcca71..a7d5465655f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
+ GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2b450c43bbd7..5e60a34692af 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */
@@ -294,6 +294,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ flags |= GUC_WA_RCS_CCS_SWITCHOUT;
+
/*
* Wa_14012197797
* Wa_22011391025
@@ -315,15 +320,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_G11(gt->i915))
flags |= GUC_WA_CONTEXT_ISOLATION;
- /* Wa_16015675438 */
- if (!RCS_MASK(gt))
- flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
-
- /* Wa_14018913170 */
- if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
- if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- }
+ /*
+ * Wa_14018913170: Applicable to all platforms supported by i915 so
+ * don't bother testing for all X/Y/Z platforms explicitly.
+ */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index be70c46604b4..57b903132776 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -204,6 +204,8 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_waklv_size: size of workaround KLVs */
+ u32 ads_waklv_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index f7372f736a77..c606bb5e3b7b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -46,6 +46,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_waklv_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_waklv_size);
+}
+
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
@@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_capture_offset(struct intel_guc *guc)
+static u32 guc_ads_waklv_offset(struct intel_guc *guc)
{
u32 offset;
@@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_waklv_offset(guc) +
+ guc_ads_waklv_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
@@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
@@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
-#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
@@ -796,6 +815,65 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
+/* Wa_14019159160 */
+static u32 guc_waklv_ra_mode(struct intel_guc *guc, u32 offset, u32 remain)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+
+ size = sizeof(klv_entry);
+ GEM_BUG_ON(remain < size);
+
+ iosys_map_memcpy_to(&guc->ads_map, offset, klv_entry, size);
+
+ return size;
+}
+
+static void guc_waklv_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 offset, addr_ggtt, remain, size;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+ offset = guc_ads_waklv_offset(guc);
+ remain = guc_ads_waklv_size(guc);
+
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ size = guc_waklv_ra_mode(guc, offset, remain);
+ offset += size;
+ remain -= size;
+ }
+
+ size = guc_ads_waklv_size(guc) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt);
+ ads_blob_write(guc, ads.wa_klv_addr_hi, 0);
+ ads_blob_write(guc, ads.wa_klv_size, size);
+}
+
+static int guc_prep_waklv(struct intel_guc *guc)
+{
+ /* Fudge something chunky for now: */
+ return PAGE_SIZE;
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -843,6 +921,9 @@ static void __guc_ads_init(struct intel_guc *guc)
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
+ /* Workaround KLV list */
+ guc_waklv_init(guc);
+
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
@@ -886,6 +967,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_capture_size = ret;
+ /* And don't forget the workaround KLVs: */
+ ret = guc_prep_waklv(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_waklv_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
@@ -961,7 +1048,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a1cd40d80517..9547fff672bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -51,6 +51,7 @@
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_EIR(0), 0, 0, "EIR" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
@@ -80,9 +81,6 @@
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
-#define COMMON_BASE_HAS_EU \
- { EIR, 0, 0, "EIR" }
-
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
@@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
@@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
@@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!cap || !ee->engine)
return -ENODEV;
- guc = &ee->engine->gt->uc.guc;
+ guc = gt_to_guc(ee->engine->gt);
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
@@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
if (!gt || !ce || !engine)
return false;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return false;
@@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
if (!gt || !ee || !ce)
return;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 52332bb14339..23f54c84cbab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
- if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
@@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
*success = false;
return true;
}
@@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -EPERM;
break;
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ guc_info(guc, "invalid w/a KLV entry\n");
+ ret = -EINVAL;
+ break;
+
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 8ae1846431da..14797e80bc92 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -96,8 +96,9 @@
#define GUC_WA_GAM_CREDITS BIT(10)
#define GUC_WA_DUAL_QUEUE BIT(11)
#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
-#define GUC_WA_CONTEXT_ISOLATION BIT(15)
#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
@@ -430,7 +431,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index cc9569af7f0c..b67a15f74276 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -111,7 +111,7 @@ static bool has_table(struct drm_i915_private *i915)
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!has_table(gt->i915))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 3e681ab6fbf9..706fffca698b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -537,6 +537,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -711,6 +725,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Enable SLPC Optimized Strategy for compute */
+ intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 6ac6503c39d4..1cb5fd44f05c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -45,5 +45,6 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d45..0eaa1064242c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce)
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
- return &ce->engine->gt->uc.guc;
+ return gt_to_guc(ce->engine->gt);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -1246,7 +1246,7 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
@@ -1311,7 +1311,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -1576,7 +1577,7 @@ static void guc_fini_engine_stats(struct intel_guc *guc)
void intel_guc_busyness_park(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
if (!guc_submission_initialized(guc))
return;
@@ -1603,7 +1604,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
unsigned long flags;
ktime_t unused;
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
-
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
@@ -2194,7 +2189,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
- struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(rq->engine->gt);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -2220,11 +2215,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
order_base_2(ce->parallel.number_children
+ 1));
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = ida_alloc_range(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids - 1,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
@@ -2247,8 +2241,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
- ida_simple_remove(&guc->submission_state.guc_ids,
- ce->guc_id.id);
+ ida_free(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
@@ -2645,6 +2639,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_CONTEXT_POLICY_ADD
@@ -2660,10 +2655,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
+ u32 slpc_ctx_freq_req = 0;
unsigned long flags;
int ret;
@@ -2675,11 +2671,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
+ slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
@@ -2736,7 +2736,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
@@ -2805,7 +2805,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
@@ -2868,7 +2868,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
@@ -4496,7 +4496,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
if (engine->class == COMPUTE_CLASS)
if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
+
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4507,7 +4513,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start;
}
@@ -4549,7 +4555,7 @@ static void guc_sched_engine_destroy(struct kref *kref)
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
@@ -5308,7 +5314,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
@@ -5370,7 +5376,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
unsigned long index;
unsigned long flags;
@@ -5822,7 +5828,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve)
return ERR_PTR(-ENOMEM);
- guc = &siblings[0]->gt->uc.guc;
+ guc = gt_to_guc(siblings[0]->gt);
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 0945b177d5f9..2d9152eb7282 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -385,7 +385,7 @@ int intel_huc_init(struct intel_huc *huc)
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
- vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
@@ -540,7 +540,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c6..7a63abf8f644 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
-
- /* XEHPSDV and PVC do not use HuC */
- if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
- i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -637,6 +633,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 756093eaf2ad..d80278eb45d7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -807,7 +807,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
- struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_file *guc_selected = &gt_to_guc(gt)->fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
@@ -1209,7 +1209,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index c900aac85adb..68feb55654f7 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -144,7 +144,7 @@ err:
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d4a3f3e093b0..4be8cb65fb7e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -50,6 +50,7 @@
#include "trace.h"
#include "display/intel_display.h"
+#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0c5dfb788eb..2b7df7fcf369 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,8 +36,10 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_sprite_regs.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 313efdabee57..4140da68aabb 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -34,11 +34,14 @@
*/
#include <uapi/drm/drm_fourcc.h>
-#include "i915_drv.h"
+
#include "gvt.h"
+#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
+#include "display/intel_sprite_regs.h"
+
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
int drm_format; /* Pixel format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 4dd52ac2043e..221a3ae81baf 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -30,6 +30,7 @@
#include <linux/firmware.h>
#include <linux/crc32.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gvt.h"
@@ -50,21 +51,7 @@ struct gvt_firmware_header {
#define dev_to_drm_minor(d) dev_get_drvdata((d))
-static ssize_t
-gvt_firmware_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t offset, size_t count)
-{
- memcpy(buf, attr->private + offset, count);
- return count;
-}
-
-static struct bin_attribute firmware_attr = {
- .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
- .read = gvt_firmware_read,
- .write = NULL,
- .mmap = NULL,
-};
+static BIN_ATTR_SIMPLE_ADMIN_RO(gvt_firmware);
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
@@ -107,10 +94,10 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
crc32_start = offsetof(struct gvt_firmware_header, version);
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
- firmware_attr.size = size;
- firmware_attr.private = firmware;
+ bin_attr_gvt_firmware.size = size;
+ bin_attr_gvt_firmware.private = firmware;
- ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ ret = device_create_bin_file(&pdev->dev, &bin_attr_gvt_firmware);
if (ret) {
vfree(firmware);
return ret;
@@ -122,8 +109,8 @@ static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
- device_remove_bin_file(&pdev->dev, &firmware_attr);
- vfree(firmware_attr.private);
+ device_remove_bin_file(&pdev->dev, &bin_attr_gvt_firmware);
+ vfree(bin_attr_gvt_firmware.private);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 094fca9b0e73..58cca4906f41 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -39,6 +39,7 @@
#include "trace.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
#if defined(VERBOSE_DEBUG)
#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index efcb00472be2..22fbddbe3e23 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -41,6 +41,7 @@
#include "gvt.h"
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
@@ -49,9 +50,11 @@
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
@@ -2763,15 +2766,15 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5b5def6ddef7..e16e0d4c9534 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -33,10 +33,12 @@
*
*/
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_dpio_phy.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 08ad1bd651f1..63c751ca4119 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -34,6 +34,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include <linux/vmalloc.h>
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 990eaa029d9c..bc717cf544e4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/sort.h>
#include <linux/string_helpers.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include "display/intel_display_params.h"
@@ -156,18 +157,6 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
case 4: return " WB (2-Way Coh)";
default: return " not defined";
}
- } else if (IS_PONTEVECCHIO(i915)) {
- switch (obj->pat_index) {
- case 0: return " UC";
- case 1: return " WC";
- case 2: return " WT";
- case 3: return " WB";
- case 4: return " WT (CLOS1)";
- case 5: return " WB (CLOS1)";
- case 6: return " WT (CLOS2)";
- case 7: return " WT (CLOS2)";
- default: return " not defined";
- }
} else if (GRAPHICS_VER(i915) >= 12) {
switch (obj->pat_index) {
case 0: return " WB";
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 8bca02025e09..33d2dcb0de65 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/debugfs.h>
#include "i915_debugfs_params.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 9ee902d5b72c..161b21eff694 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -202,7 +202,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
unsigned int i;
for_each_gt(gt, i915, i)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
}
@@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_cleanup_modeset2;
ret = intel_pxp_init(i915);
- if (ret != -ENODEV)
+ if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
@@ -920,27 +920,6 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-static void i915_driver_lastclose(struct drm_device *dev)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- intel_fbdev_restore_mode(i915);
-
- vga_switcheroo_process_delayed_switch();
-}
-
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1831,7 +1810,6 @@ static const struct drm_driver i915_drm_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.release = i915_driver_release,
.open = i915_driver_open,
- .lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
.show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e81b3b2858ac..ee0d7d5f135d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -235,25 +235,17 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
- bool display_irqs_enabled;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
- union {
- u32 irq_mask;
- u32 de_irq_mask[I915_MAX_PIPES];
- };
- u32 pipestat_irq_mask[I915_MAX_PIPES];
+ u32 irq_mask;
bool preserve_bios_swizzle;
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_preferred_vco_freq;
- unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -350,9 +342,6 @@ struct drm_i915_private {
struct intel_pxp *pxp;
- /* For i915gm/i945gm vblank irq workaround */
- u8 vblank_enabled;
-
bool irq_enabled;
struct i915_pmu pmu;
@@ -544,9 +533,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
#define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S)
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
-#define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
-#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
#define IS_LUNARLAKE(i915) 0
@@ -621,17 +608,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
- (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_PVC_BD_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_BASEDIE_STEP(__i915, since, until))
-
-#define IS_PVC_CT_STEP(__i915, since, until) \
- (IS_PONTEVECCHIO(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 5c3fec63cb4c..a62405787e77 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -155,12 +155,18 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_FREQ_HINT:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ value = 1;
+ else
+ value = -EINVAL;
+ break;
case I915_PARAM_HAS_CONTEXT_ISOLATION:
value = intel_engines_has_context_isolation(i915);
break;
case I915_PARAM_SLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
value = sseu->slice_mask;
@@ -169,7 +175,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_SUBSLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL;
/* Only copy bits from the first slice */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a0b784ebaddd..625b3c024540 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/nmi.h>
#include <linux/pagevec.h>
@@ -1245,8 +1246,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
ee->fault_reg = intel_uncore_read(engine->uncore,
XELPMP_RING_FAULT_REG);
-
- else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
XEHP_RING_FAULT_REG);
else if (GRAPHICS_VER(i915) >= 12)
@@ -1852,7 +1852,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0);
gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8c3f443c8347..49db3e09826c 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
/*
@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
- ei->reg_val_prev = reg_val;
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ei->reg_val_prev = reg_val;
- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
- hwmon->scl_shift_energy);
- mutex_unlock(&hwmon->hwmon_lock);
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
static ssize_t
@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
- goto unlock;
-
- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
-unlock:
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
@@ -738,12 +739,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
- } else if (IS_XEHPSDV(i915)) {
- hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT;
- hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
- hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT;
- hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS;
- hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
@@ -792,7 +787,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!IS_DGFX(i915))
return;
- hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
@@ -818,14 +813,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
hwm_get_preregistration_info(i915);
/* hwmon_dev points to device hwmon<i> */
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
- ddat,
- &hwm_chip_info,
- hwm_groups);
- if (IS_ERR(hwmon_dev)) {
- i915->hwmon = NULL;
- return;
- }
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
+ ddat,
+ &hwm_chip_info,
+ hwm_groups);
+ if (IS_ERR(hwmon_dev))
+ goto err;
ddat->hwmon_dev = hwmon_dev;
@@ -838,16 +831,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
continue;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
- ddat_gt,
- &hwm_gt_chip_info,
- NULL);
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
+ ddat_gt,
+ &hwm_gt_chip_info,
+ NULL);
if (!IS_ERR(hwmon_dev))
ddat_gt->hwmon_dev = hwmon_dev;
}
+ return;
+err:
+ i915_hwmon_unregister(i915);
}
void i915_hwmon_unregister(struct drm_i915_private *i915)
{
- fetch_and_zero(&i915->hwmon);
+ struct i915_hwmon *hwmon = i915->hwmon;
+ struct intel_gt *gt;
+ int i;
+
+ if (!hwmon)
+ return;
+
+ for_each_gt(gt, i915, i)
+ if (hwmon->ddat_gt[i].hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
+
+ if (hwmon->ddat.hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat.hwmon_dev);
+
+ mutex_destroy(&hwmon->hwmon_lock);
+
+ kfree(i915->hwmon);
+ i915->hwmon = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8130f043693b..678d632ed043 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -702,7 +702,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -767,7 +767,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -784,7 +784,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -838,7 +838,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
+ if (dev_priv->display.irq.display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index ba82277254b7..cc41974cee74 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,6 +25,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpufeature.h>
+#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index de43048543e8..8c00169e3ab7 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -108,9 +108,6 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400,
i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
-i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
- "DMC firmware path to use instead of the default one");
-
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 1315d7fac850..2eb3f2115ff2 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -51,7 +51,6 @@ struct drm_printer;
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
- param(char *, dmc_firmware_path, NULL, 0400) \
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 8b4fdeabb12a..405ca17a990b 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,6 +38,9 @@
#include "i915_reg.h"
#include "intel_pci_config.h"
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for device info");
+
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
.__runtime.graphics.ip.ver = (x), \
@@ -59,14 +62,6 @@
[I915_CACHE_WT] = 2, \
}
-#define PVC_CACHELEVEL \
- .cachelevel_to_pat = { \
- [I915_CACHE_NONE] = 0, \
- [I915_CACHE_LLC] = 3, \
- [I915_CACHE_L3_LLC] = 3, \
- [I915_CACHE_WT] = 2, \
- }
-
#define MTL_CACHELEVEL \
.cachelevel_to_pat = { \
[I915_CACHE_NONE] = 2, \
@@ -705,8 +700,6 @@ static const struct intel_device_info adl_p_info = {
I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .__runtime.graphics.ip.ver = 12, \
- .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
TGL_CACHELEVEL, \
.dma_mask_size = 46, \
@@ -730,32 +723,12 @@ static const struct intel_device_info adl_p_info = {
.__runtime.ppgtt_size = 48, \
.__runtime.ppgtt_type = INTEL_PPGTT_FULL
-#define XE_HPM_FEATURES \
- .__runtime.media.ip.ver = 12, \
- .__runtime.media.ip.rel = 50
-
-__maybe_unused
-static const struct intel_device_info xehpsdv_info = {
- XE_HP_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- PLATFORM(INTEL_XEHPSDV),
- .has_64k_pages = 1,
- .has_media_ratio_mode = 1,
- .platform_engine_mask =
- BIT(RCS0) | BIT(BCS0) |
- BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
- BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
- BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
-};
-
#define DG2_FEATURES \
XE_HP_FEATURES, \
- XE_HPM_FEATURES, \
DGFX_FEATURES, \
+ .__runtime.graphics.ip.ver = 12, \
.__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.ver = 12, \
.__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_64k_pages = 1, \
@@ -778,33 +751,6 @@ static const struct intel_device_info ats_m_info = {
.tuning_thread_rr_after_dep = 1,
};
-#define XE_HPC_FEATURES \
- XE_HP_FEATURES, \
- .dma_mask_size = 52, \
- .has_3d_pipeline = 0, \
- .has_guc_deprivilege = 1, \
- .has_l3_ccs_read = 1, \
- .has_mslice_steering = 0, \
- .has_one_eu_per_fuse_bit = 1
-
-__maybe_unused
-static const struct intel_device_info pvc_info = {
- XE_HPC_FEATURES,
- XE_HPM_FEATURES,
- DGFX_FEATURES,
- .__runtime.graphics.ip.rel = 60,
- .__runtime.media.ip.rel = 60,
- PLATFORM(INTEL_PONTEVECCHIO),
- .has_flat_ccs = 0,
- .max_pat_index = 7,
- .platform_engine_mask =
- BIT(BCS0) |
- BIT(VCS0) |
- BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
- .require_force_probe = 1,
- PVC_CACHELEVEL,
-};
-
static const struct intel_gt_definition xelpmp_extra_gt[] = {
{
.type = GT_MEDIA,
@@ -842,6 +788,8 @@ static const struct intel_device_info mtl_info = {
#undef PLATFORM
+__diag_pop();
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bd9d812b1afa..0b1cd4c7a525 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true;
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5)
-#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
*
@@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*/
if (oa_report_ctx_invalid(stream, report) &&
- GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32);
}
@@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream)
mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
- } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
(XEHP_SW_CTX_ID_SHIFT - 32);
@@ -2881,11 +2881,11 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
+ * Wa_1508761755
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -2911,7 +2911,7 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
/*
* Initialize Super Queue Internal Cnt Register
* Set PMON Enable in order to collect valid metrics.
- * Enable byets per clock reporting in OA for XEHPSDV onward.
+ * Enable bytes per clock reporting in OA.
*/
sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
@@ -2971,10 +2971,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
u32 sqcnt1;
/*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
+ * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating.
*/
- if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
@@ -4123,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
props->hold_preemption = !!value;
break;
case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
- if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
drm_dbg(&perf->i915->drm,
"SSEU config not supported on gfx %x\n",
GRAPHICS_VER_FULL(perf->i915));
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3baa2f54a86e..14d9ec0ed777 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915,
struct intel_engine_cs *engine;
struct i915_engine_class_instance classinstance;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return -ENODEV;
classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e00557e1a57f..e22a82a5ddd7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,367 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-/*
- * Per pipe/PLL DPIO regs
- */
-#define _VLV_PLL_DW3_CH0 0x800c
-#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
-#define DPIO_POST_DIV_DAC 0
-#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
-#define DPIO_POST_DIV_LVDS1 2
-#define DPIO_POST_DIV_LVDS2 3
-#define DPIO_K_SHIFT (24) /* 4 bits */
-#define DPIO_P1_SHIFT (21) /* 3 bits */
-#define DPIO_P2_SHIFT (16) /* 5 bits */
-#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1 << 11)
-#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
-#define DPIO_M2DIV_MASK 0xff
-#define _VLV_PLL_DW3_CH1 0x802c
-#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
-
-#define _VLV_PLL_DW5_CH0 0x8014
-#define DPIO_REFSEL_OVERRIDE 27
-#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
-#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
-#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
-#define DPIO_PLL_REFCLK_SEL_MASK 3
-#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
-#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
-#define _VLV_PLL_DW5_CH1 0x8034
-#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
-
-#define _VLV_PLL_DW7_CH0 0x801c
-#define _VLV_PLL_DW7_CH1 0x803c
-#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
-
-#define _VLV_PLL_DW8_CH0 0x8040
-#define _VLV_PLL_DW8_CH1 0x8060
-#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
-
-#define VLV_PLL_DW9_BCAST 0xc044
-#define _VLV_PLL_DW9_CH0 0x8044
-#define _VLV_PLL_DW9_CH1 0x8064
-#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
-
-#define _VLV_PLL_DW10_CH0 0x8048
-#define _VLV_PLL_DW10_CH1 0x8068
-#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
-
-#define _VLV_PLL_DW11_CH0 0x804c
-#define _VLV_PLL_DW11_CH1 0x806c
-#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
-
-/* Spec for ref block start counts at DW10 */
-#define VLV_REF_DW13 0x80ac
-
-#define VLV_CMN_DW0 0x8100
-
-/*
- * Per DDI channel DPIO regs
- */
-
-#define _VLV_PCS_DW0_CH0 0x8200
-#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
-#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
-#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
-
-#define _VLV_PCS01_DW0_CH0 0x200
-#define _VLV_PCS23_DW0_CH0 0x400
-#define _VLV_PCS01_DW0_CH1 0x2600
-#define _VLV_PCS23_DW0_CH1 0x2800
-#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
-#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
-
-#define _VLV_PCS_DW1_CH0 0x8204
-#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
-#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
-#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
-
-#define _VLV_PCS01_DW1_CH0 0x204
-#define _VLV_PCS23_DW1_CH0 0x404
-#define _VLV_PCS01_DW1_CH1 0x2604
-#define _VLV_PCS23_DW1_CH1 0x2804
-#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
-#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
-
-#define _VLV_PCS_DW8_CH0 0x8220
-#define _VLV_PCS_DW8_CH1 0x8420
-#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
-#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
-#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
-
-#define _VLV_PCS01_DW8_CH0 0x0220
-#define _VLV_PCS23_DW8_CH0 0x0420
-#define _VLV_PCS01_DW8_CH1 0x2620
-#define _VLV_PCS23_DW8_CH1 0x2820
-#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
-#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
-
-#define _VLV_PCS_DW9_CH0 0x8224
-#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
-#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
-#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
-#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
-#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
-#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
-
-#define _VLV_PCS01_DW9_CH0 0x224
-#define _VLV_PCS23_DW9_CH0 0x424
-#define _VLV_PCS01_DW9_CH1 0x2624
-#define _VLV_PCS23_DW9_CH1 0x2824
-#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
-#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
-
-#define _CHV_PCS_DW10_CH0 0x8228
-#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
-#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
-
-#define _VLV_PCS01_DW10_CH0 0x0228
-#define _VLV_PCS23_DW10_CH0 0x0428
-#define _VLV_PCS01_DW10_CH1 0x2628
-#define _VLV_PCS23_DW10_CH1 0x2828
-#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
-#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
-
-#define _VLV_PCS_DW11_CH0 0x822c
-#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
-#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
-
-#define _VLV_PCS01_DW11_CH0 0x022c
-#define _VLV_PCS23_DW11_CH0 0x042c
-#define _VLV_PCS01_DW11_CH1 0x262c
-#define _VLV_PCS23_DW11_CH1 0x282c
-#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
-#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
-
-#define _VLV_PCS01_DW12_CH0 0x0230
-#define _VLV_PCS23_DW12_CH0 0x0430
-#define _VLV_PCS01_DW12_CH1 0x2630
-#define _VLV_PCS23_DW12_CH1 0x2830
-#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
-#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
-
-#define _VLV_PCS_DW12_CH0 0x8230
-#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
-#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
-
-#define _VLV_PCS_DW14_CH0 0x8238
-#define _VLV_PCS_DW14_CH1 0x8438
-#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
-
-#define _VLV_PCS_DW23_CH0 0x825c
-#define _VLV_PCS_DW23_CH1 0x845c
-#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
-
-#define _VLV_TX_DW2_CH0 0x8288
-#define _VLV_TX_DW2_CH1 0x8488
-#define DPIO_SWING_MARGIN000_SHIFT 16
-#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
-#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
-#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
-
-#define _VLV_TX_DW3_CH0 0x828c
-#define _VLV_TX_DW3_CH1 0x848c
-/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
-#define DPIO_SWING_MARGIN101_SHIFT 16
-#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
-#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
-
-#define _VLV_TX_DW4_CH0 0x8290
-#define _VLV_TX_DW4_CH1 0x8490
-#define DPIO_SWING_DEEMPH9P5_SHIFT 24
-#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
-#define DPIO_SWING_DEEMPH6P0_SHIFT 16
-#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
-#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
-
-#define _VLV_TX3_DW4_CH0 0x690
-#define _VLV_TX3_DW4_CH1 0x2a90
-#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
-
-#define _VLV_TX_DW5_CH0 0x8294
-#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1 << 31)
-#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
-
-#define _VLV_TX_DW11_CH0 0x82ac
-#define _VLV_TX_DW11_CH1 0x84ac
-#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
-
-#define _VLV_TX_DW14_CH0 0x82b8
-#define _VLV_TX_DW14_CH1 0x84b8
-#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
-
-/* CHV dpPhy registers */
-#define _CHV_PLL_DW0_CH0 0x8000
-#define _CHV_PLL_DW0_CH1 0x8180
-#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
-
-#define _CHV_PLL_DW1_CH0 0x8004
-#define _CHV_PLL_DW1_CH1 0x8184
-#define DPIO_CHV_N_DIV_SHIFT 8
-#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
-#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
-
-#define _CHV_PLL_DW2_CH0 0x8008
-#define _CHV_PLL_DW2_CH1 0x8188
-#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
-
-#define _CHV_PLL_DW3_CH0 0x800c
-#define _CHV_PLL_DW3_CH1 0x818c
-#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
-#define DPIO_CHV_FIRST_MOD (0 << 8)
-#define DPIO_CHV_SECOND_MOD (1 << 8)
-#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
-#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
-#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
-
-#define _CHV_PLL_DW6_CH0 0x8018
-#define _CHV_PLL_DW6_CH1 0x8198
-#define DPIO_CHV_GAIN_CTRL_SHIFT 16
-#define DPIO_CHV_INT_COEFF_SHIFT 8
-#define DPIO_CHV_PROP_COEFF_SHIFT 0
-#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
-
-#define _CHV_PLL_DW8_CH0 0x8020
-#define _CHV_PLL_DW8_CH1 0x81A0
-#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
-#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
-#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
-
-#define _CHV_PLL_DW9_CH0 0x8024
-#define _CHV_PLL_DW9_CH1 0x81A4
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
-#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
-#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
-#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
-
-#define _CHV_CMN_DW0_CH0 0x8100
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
-#define DPIO_ALLDL_POWERDOWN (1 << 1)
-#define DPIO_ANYDL_POWERDOWN (1 << 0)
-
-#define _CHV_CMN_DW5_CH0 0x8114
-#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
-#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
-#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
-#define CHV_BUFRIGHTENA1_MASK (3 << 20)
-#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
-#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
-#define CHV_BUFLEFTENA1_FORCE (3 << 22)
-#define CHV_BUFLEFTENA1_MASK (3 << 22)
-
-#define _CHV_CMN_DW13_CH0 0x8134
-#define _CHV_CMN_DW0_CH1 0x8080
-#define DPIO_CHV_S1_DIV_SHIFT 21
-#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
-#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
-#define DPIO_CHV_K_DIV_SHIFT 4
-#define DPIO_PLL_FREQLOCK (1 << 1)
-#define DPIO_PLL_LOCK (1 << 0)
-#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
-
-#define _CHV_CMN_DW14_CH0 0x8138
-#define _CHV_CMN_DW1_CH1 0x8084
-#define DPIO_AFC_RECAL (1 << 14)
-#define DPIO_DCLKP_EN (1 << 13)
-#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
-#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
-#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
-
-#define _CHV_CMN_DW19_CH0 0x814c
-#define _CHV_CMN_DW6_CH1 0x8098
-#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
-#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
-#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
-#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
-
-#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
-
-#define CHV_CMN_DW28 0x8170
-#define DPIO_CL1POWERDOWNEN (1 << 23)
-#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
-#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
-#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
-#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
-
-#define CHV_CMN_DW30 0x8178
-#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
-#define DPIO_LRC_BYPASS (1 << 3)
-
-#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
- (lane) * 0x200 + (offset))
-
-#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
-#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
-#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
-#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
-#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
-#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
-#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
-#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
-#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
-#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
-#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
-#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
-#define DPIO_FRC_LATENCY_SHFIT 8
-#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
-#define DPIO_UPAR_SHIFT 30
-
-/* BXT PHY registers */
-#define _BXT_PHY0_BASE 0x6C000
-#define _BXT_PHY1_BASE 0x162000
-#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) \
- _PICK_EVEN_2RANGES(phy, 1, \
- _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, _BXT_PHY2_BASE)
-
-#define _BXT_PHY(phy, reg) \
- _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
-
-#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
- (reg_ch1) - _BXT_PHY0_BASE))
-#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
- _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
-
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define MIPIO_RST_CTRL (1 << 2)
@@ -577,250 +216,6 @@
_PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
-/* BXT PHY PLL registers */
-#define _PORT_PLL_A 0x46074
-#define _PORT_PLL_B 0x46078
-#define _PORT_PLL_C 0x4607c
-#define PORT_PLL_ENABLE REG_BIT(31)
-#define PORT_PLL_LOCK REG_BIT(30)
-#define PORT_PLL_REF_SEL REG_BIT(27)
-#define PORT_PLL_POWER_ENABLE REG_BIT(26)
-#define PORT_PLL_POWER_STATE REG_BIT(25)
-#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
-
-#define _PORT_PLL_EBB_0_A 0x162034
-#define _PORT_PLL_EBB_0_B 0x6C034
-#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
-#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
-#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
-#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
-
-#define _PORT_PLL_EBB_4_A 0x162038
-#define _PORT_PLL_EBB_4_B 0x6C038
-#define _PORT_PLL_EBB_4_C 0x6C344
-#define PORT_PLL_RECALIBRATE REG_BIT(14)
-#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
-#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
-
-#define _PORT_PLL_0_A 0x162100
-#define _PORT_PLL_0_B 0x6C100
-#define _PORT_PLL_0_C 0x6C380
-/* PORT_PLL_0_A */
-#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
-#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
-/* PORT_PLL_1_A */
-#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
-#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
-/* PORT_PLL_2_A */
-#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
-#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
-/* PORT_PLL_3_A */
-#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
-/* PORT_PLL_6_A */
-#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
-#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
-#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
-#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
-#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
-#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
-/* PORT_PLL_8_A */
-#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
-#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
-/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
-#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
-/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
-#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
-#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
-#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
- (idx) * 4)
-
-/* BXT PHY common lane registers */
-#define _PORT_CL1CM_DW0_A 0x162000
-#define _PORT_CL1CM_DW0_BC 0x6C000
-#define PHY_POWER_GOOD (1 << 16)
-#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-
-#define _PORT_CL1CM_DW9_A 0x162024
-#define _PORT_CL1CM_DW9_BC 0x6C024
-#define IREF0RC_OFFSET_SHIFT 8
-#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
-
-#define _PORT_CL1CM_DW10_A 0x162028
-#define _PORT_CL1CM_DW10_BC 0x6C028
-#define IREF1RC_OFFSET_SHIFT 8
-#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-
-#define _PORT_CL1CM_DW28_A 0x162070
-#define _PORT_CL1CM_DW28_BC 0x6C070
-#define OCL1_POWER_DOWN_EN (1 << 23)
-#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
-#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
-
-#define _PORT_CL1CM_DW30_A 0x162078
-#define _PORT_CL1CM_DW30_BC 0x6C078
-#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-
-/* The spec defines this only for BXT PHY0, but lets assume that this
- * would exist for PHY1 too if it had a second channel.
- */
-#define _PORT_CL2CM_DW6_A 0x162358
-#define _PORT_CL2CM_DW6_BC 0x6C358
-#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
-#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-
-/* BXT PHY Ref registers */
-#define _PORT_REF_DW3_A 0x16218C
-#define _PORT_REF_DW3_BC 0x6C18C
-#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
-
-#define _PORT_REF_DW6_A 0x162198
-#define _PORT_REF_DW6_BC 0x6C198
-#define GRC_CODE_SHIFT 24
-#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
-#define GRC_CODE_FAST_SHIFT 16
-#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
-#define GRC_CODE_SLOW_SHIFT 8
-#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
-#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
-
-#define _PORT_REF_DW8_A 0x1621A0
-#define _PORT_REF_DW8_BC 0x6C1A0
-#define GRC_DIS (1 << 15)
-#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
-
-/* BXT PHY PCS registers */
-#define _PORT_PCS_DW10_LN01_A 0x162428
-#define _PORT_PCS_DW10_LN01_B 0x6C428
-#define _PORT_PCS_DW10_LN01_C 0x6C828
-#define _PORT_PCS_DW10_GRP_A 0x162C28
-#define _PORT_PCS_DW10_GRP_B 0x6CC28
-#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
-
-#define TX2_SWING_CALC_INIT (1 << 31)
-#define TX1_SWING_CALC_INIT (1 << 30)
-
-#define _PORT_PCS_DW12_LN01_A 0x162430
-#define _PORT_PCS_DW12_LN01_B 0x6C430
-#define _PORT_PCS_DW12_LN01_C 0x6C830
-#define _PORT_PCS_DW12_LN23_A 0x162630
-#define _PORT_PCS_DW12_LN23_B 0x6C630
-#define _PORT_PCS_DW12_LN23_C 0x6CA30
-#define _PORT_PCS_DW12_GRP_A 0x162c30
-#define _PORT_PCS_DW12_GRP_B 0x6CC30
-#define _PORT_PCS_DW12_GRP_C 0x6CE30
-#define LANESTAGGER_STRAP_OVRD (1 << 6)
-#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
-
-/* BXT PHY TX registers */
-#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
- ((lane) & 1) * 0x80)
-
-#define _PORT_TX_DW2_LN0_A 0x162508
-#define _PORT_TX_DW2_LN0_B 0x6C508
-#define _PORT_TX_DW2_LN0_C 0x6C908
-#define _PORT_TX_DW2_GRP_A 0x162D08
-#define _PORT_TX_DW2_GRP_B 0x6CD08
-#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
-#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define MARGIN_000_SHIFT 16
-#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
-#define UNIQ_TRANS_SCALE_SHIFT 8
-#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT)
-
-#define _PORT_TX_DW3_LN0_A 0x16250C
-#define _PORT_TX_DW3_LN0_B 0x6C50C
-#define _PORT_TX_DW3_LN0_C 0x6C90C
-#define _PORT_TX_DW3_GRP_A 0x162D0C
-#define _PORT_TX_DW3_GRP_B 0x6CD0C
-#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
-#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define SCALE_DCOMP_METHOD (1 << 26)
-#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
-
-#define _PORT_TX_DW4_LN0_A 0x162510
-#define _PORT_TX_DW4_LN0_B 0x6C510
-#define _PORT_TX_DW4_LN0_C 0x6C910
-#define _PORT_TX_DW4_GRP_A 0x162D10
-#define _PORT_TX_DW4_GRP_B 0x6CD10
-#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
-#define DEEMPH_SHIFT 24
-#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
-
-#define _PORT_TX_DW5_LN0_A 0x162514
-#define _PORT_TX_DW5_LN0_B 0x6C514
-#define _PORT_TX_DW5_LN0_C 0x6C914
-#define _PORT_TX_DW5_GRP_A 0x162D14
-#define _PORT_TX_DW5_GRP_B 0x6CD14
-#define _PORT_TX_DW5_GRP_C 0x6CF14
-#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_LN0_B, \
- _PORT_TX_DW5_LN0_C)
-#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
- _PORT_TX_DW5_GRP_B, \
- _PORT_TX_DW5_GRP_C)
-#define DCC_DELAY_RANGE_1 (1 << 9)
-#define DCC_DELAY_RANGE_2 (1 << 8)
-
-#define _PORT_TX_DW14_LN0_A 0x162538
-#define _PORT_TX_DW14_LN0_B 0x6C538
-#define _PORT_TX_DW14_LN0_C 0x6C938
-#define LATENCY_OPTIM_SHIFT 30
-#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
- _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
-
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
/* SKL VccIO mask */
@@ -1228,22 +623,6 @@
#define I915_ASLE_INTERRUPT (1 << 0)
#define I915_BSD_USER_INTERRUPT (1 << 25)
-#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
-#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
-
-/* DisplayPort Audio w/ LPE */
-#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38)
-#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0)
-
-#define _VLV_AUD_PORT_EN_B_DBG (VLV_DISPLAY_BASE + 0x62F20)
-#define _VLV_AUD_PORT_EN_C_DBG (VLV_DISPLAY_BASE + 0x62F30)
-#define _VLV_AUD_PORT_EN_D_DBG (VLV_DISPLAY_BASE + 0x62F34)
-#define VLV_AUD_PORT_EN_DBG(port) _MMIO_PORT3((port) - PORT_B, \
- _VLV_AUD_PORT_EN_B_DBG, \
- _VLV_AUD_PORT_EN_C_DBG, \
- _VLV_AUD_PORT_EN_D_DBG)
-#define VLV_AMP_MUTE (1 << 1)
-
#define GEN6_BSD_RNCID _MMIO(0x12198)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
@@ -1264,109 +643,6 @@
#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
-/*
- * Framebuffer compression (915+ only)
- */
-
-#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
-#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
-#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN REG_BIT(31)
-#define FBC_CTL_PERIODIC REG_BIT(30)
-#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
-#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
-#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
-#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
-#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
-#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
-#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
-#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
-#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS REG_BIT(0)
-#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING REG_BIT(31)
-#define FBC_STAT_COMPRESSED REG_BIT(30)
-#define FBC_STAT_MODIFIED REG_BIT(29)
-#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
-#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
-#define FBC_CTL_FENCE_DBL REG_BIT(4)
-#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
-#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
-#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
-#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
-#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
-#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
-#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
-#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
-#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
-#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
-#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
-#define FBC_MOD_NUM_VALID REG_BIT(0)
-#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
-#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
-#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
-#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
-#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
-#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
-
-#define FBC_LL_SIZE (1536)
-
-/* Framebuffer compression for GM45+ */
-#define DPFC_CB_BASE _MMIO(0x3200)
-#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240)
-#define DPFC_CONTROL _MMIO(0x3208)
-#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248)
-#define DPFC_CTL_EN REG_BIT(31)
-#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
-#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
-#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
-#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
-#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
-#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
-#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */
-#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id))
-#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
-#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
-#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
-#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
-#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
-#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
-#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
-#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
-#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c)
-#define DPFC_RECOMP_STALL_EN REG_BIT(27)
-#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
-#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
-#define DPFC_STATUS _MMIO(0x3210)
-#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250)
-#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
-#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
-#define DPFC_STATUS2 _MMIO(0x3214)
-#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254)
-#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
-#define DPFC_FENCE_YOFF _MMIO(0x3218)
-#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
-#define DPFC_CHICKEN _MMIO(0x3224)
-#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
-#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
-#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
-#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
-#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
-#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
-
-#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
-#define FBC_STRIDE_OVERRIDE REG_BIT(15)
-#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
-#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
-
-#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID REG_BIT(0)
-#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
-
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS REG_BIT(22)
#define ILK_PABSTRETCH_DIS REG_BIT(21)
@@ -1382,37 +658,18 @@
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
-
-/*
- * Framebuffer compression for Sandybridge
- *
- * The following two registers are of type GTTMMADR
- */
-#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_DPFC_FENCE_EN REG_BIT(29)
-#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
-#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
-#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
-
-/* Framebuffer compression for Ivybridge */
-#define IVB_FBC_RT_BASE _MMIO(0x7020)
-#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
-
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE REG_BIT(31)
#define IPS_FALSE_COLOR REG_BIT(4)
-#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384)
-#define FBC_REND_NUKE REG_BIT(2)
-#define FBC_REND_CACHE_CLEAN REG_BIT(1)
-
/*
* Clock control & power management
*/
-#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
-#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
-#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
-#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
+#define _DPLL_A 0x6014
+#define _DPLL_B 0x6018
+#define _CHV_DPLL_C 0x6030
+#define DPLL(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
#define VGA1 _MMIO(0x6004)
@@ -1508,10 +765,11 @@
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
-#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
-#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
-#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
+#define _DPLL_A_MD 0x601c
+#define _DPLL_B_MD 0x6020
+#define _CHV_DPLL_C_MD 0x603c
+#define DPLL_MD(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \
+ (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -1716,42 +974,10 @@
#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
-/*
- * Palette regs
- */
-#define _PALETTE_A 0xa000
-#define _PALETTE_B 0xa800
-#define _CHV_PALETTE_C 0xc000
-/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */
-#define PALETTE_RED_MASK REG_GENMASK(23, 16)
-#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
-#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode ldw */
-#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
-#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
-#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
-/* pre-i965 10bit interpolated mode udw */
-#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
-#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
-#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
-#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
-#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
-#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
-#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
-#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
-#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK_EVEN_2RANGES(pipe, 2, \
- _PALETTE_A, _PALETTE_B, \
- _CHV_PALETTE_C, _CHV_PALETTE_C) + \
- (i) * 4)
-
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
-#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
-#define PVC_RP_STATE_CAP _MMIO(0x281014)
#define MTL_RP_STATE_CAP _MMIO(0x138000)
#define MTL_MEDIAP_STATE_CAP _MMIO(0x138020)
@@ -1911,18 +1137,18 @@
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
-#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
-#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
-#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
-#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
-#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
-#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
+#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_CTL_A)
+#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_1_A_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_2_A_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_3_A_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_4_A_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_5_A_IVB)
-#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
-#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
-#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
-#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
-#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
+#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RED_A)
+#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_GREEN_A)
+#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_BLUE_A)
+#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES1_A_I915)
+#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES2_A_G4X)
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
@@ -1991,23 +1217,23 @@
#define _TRANS_VSYNC_DSI1 0x6b814
#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
-#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
-#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
-#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
-#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
-#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
-#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
-#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
-#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
-#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
/* VRR registers */
#define _TRANS_VRR_CTL_A 0x60420
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A)
#define VRR_CTL_VRR_ENABLE REG_BIT(31)
#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
@@ -2021,21 +1247,21 @@
#define _TRANS_VRR_VMAX_B 0x61424
#define _TRANS_VRR_VMAX_C 0x62424
#define _TRANS_VRR_VMAX_D 0x63424
-#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A)
#define VRR_VMAX_MASK REG_GENMASK(19, 0)
#define _TRANS_VRR_VMIN_A 0x60434
#define _TRANS_VRR_VMIN_B 0x61434
#define _TRANS_VRR_VMIN_C 0x62434
#define _TRANS_VRR_VMIN_D 0x63434
-#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A)
#define VRR_VMIN_MASK REG_GENMASK(15, 0)
#define _TRANS_VRR_VMAXSHIFT_A 0x60428
#define _TRANS_VRR_VMAXSHIFT_B 0x61428
#define _TRANS_VRR_VMAXSHIFT_C 0x62428
#define _TRANS_VRR_VMAXSHIFT_D 0x63428
-#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VMAXSHIFT_A)
#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
#define VRR_VMAXSHIFT_DEC REG_BIT(16)
@@ -2045,7 +1271,7 @@
#define _TRANS_VRR_STATUS_B 0x6142C
#define _TRANS_VRR_STATUS_C 0x6242C
#define _TRANS_VRR_STATUS_D 0x6342C
-#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A)
#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
@@ -2065,7 +1291,7 @@
#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
-#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_VTOTAL_PREV_A)
#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
@@ -2076,7 +1302,7 @@
#define _TRANS_VRR_FLIPLINE_B 0x61438
#define _TRANS_VRR_FLIPLINE_C 0x62438
#define _TRANS_VRR_FLIPLINE_D 0x63438
-#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(dev_priv, trans, \
_TRANS_VRR_FLIPLINE_A)
#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
@@ -2084,17 +1310,24 @@
#define _TRANS_VRR_STATUS2_B 0x6143C
#define _TRANS_VRR_STATUS2_C 0x6243C
#define _TRANS_VRR_STATUS2_D 0x6343C
-#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A)
#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
#define _TRANS_PUSH_A 0x60A70
#define _TRANS_PUSH_B 0x61A70
#define _TRANS_PUSH_C 0x62A70
#define _TRANS_PUSH_D 0x63A70
-#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A)
#define TRANS_PUSH_EN REG_BIT(31)
#define TRANS_PUSH_SEND REG_BIT(30)
+#define _TRANS_VRR_VSYNC_A 0x60078
+#define TRANS_VRR_VSYNC(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A)
+#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16)
+#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end))
+#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0)
+#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start))
+
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2312,6 +1545,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_ASYNC_DATA_SIZE 36
#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
@@ -2350,6 +1584,8 @@
#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+/* ADL and later: */
+#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -2588,6 +1824,9 @@
#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0)
+#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
+
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -2639,18 +1878,18 @@
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
-#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
-#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
-#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
-#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define TRANSCONF(trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
+#define PIPEDSL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
+#define PIPEFRAME(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
+#define PIPESTAT(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(dev_priv, pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
-#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
#define _PIPE_MISC_A 0x70030
@@ -2694,7 +1933,7 @@
#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
#define _ICL_PIPE_A_STATUS 0x70058
-#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS)
#define PIPE_STATUS_UNDERRUN REG_BIT(31)
#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
@@ -2969,8 +2208,8 @@
#define _WM0_PIPEA_ILK 0x45100
#define _WM0_PIPEB_ILK 0x45104
#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
@@ -3024,8 +2263,8 @@
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_G4X 0x70040
#define _PIPEA_FLIPCOUNT_G4X 0x70044
-#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
-#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
+#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
/* Cursor A & B regs */
#define _CURACNTR 0x70080
@@ -3053,6 +2292,7 @@
#define MCURSOR_MODE_DISABLE 0x00
#define MCURSOR_MODE_128_32B_AX 0x02
#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_2B 0x04
#define MCURSOR_MODE_64_32B_AX 0x07
#define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX)
#define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX)
@@ -3085,14 +2325,14 @@
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
-#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
-#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
-#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
-#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
-#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
-#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
-#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
+#define CURCNTR(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURACNTR)
+#define CURBASE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURABASE)
+#define CURPOS(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS)
+#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS_ERLY_TPT)
+#define CURSIZE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASIZE)
+#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_FBC_CTL_A)
+#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_CHICKEN_A)
+#define CURSURFLIVE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASURFLIVE)
/* Display A control */
#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
@@ -3149,18 +2389,18 @@
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
-#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
-#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
-#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
-#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
-#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
-#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
-#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
-#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR_VLV)
+#define DSPCNTR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPACNTR)
+#define DSPADDR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR)
+#define DSPSTRIDE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASTRIDE)
+#define DSPPOS(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAPOS)
+#define DSPSIZE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASIZE)
+#define DSPSURF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURF)
+#define DSPTILEOFF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
-#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
-#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
+#define DSPOFFSET(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAOFFSET)
+#define DSPSURFLIVE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO_PIPE2(dev_priv, plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -3187,11 +2427,11 @@
#define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha))
-#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
-#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
-#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
-#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
-#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
+#define CHV_BLEND(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMCNSTALPHA_A)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3241,346 +2481,6 @@
#define _PIPEDSI0CONF 0x7b008
#define _PIPEDSI1CONF 0x7b808
-/* Sprite A control */
-#define _DVSACNTR 0x72180
-#define DVS_ENABLE REG_BIT(31)
-#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27)
-#define DVS_FORMAT_MASK REG_GENMASK(26, 25)
-#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0)
-#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1)
-#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2)
-#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3)
-#define DVS_PIPE_CSC_ENABLE REG_BIT(24)
-#define DVS_SOURCE_KEY REG_BIT(22)
-#define DVS_RGB_ORDER_XBGR REG_BIT(20)
-#define DVS_YUV_FORMAT_BT709 REG_BIT(18)
-#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0)
-#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1)
-#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2)
-#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3)
-#define DVS_ROTATE_180 REG_BIT(15)
-#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define DVS_TILED REG_BIT(10)
-#define DVS_DEST_KEY REG_BIT(2)
-#define _DVSALINOFF 0x72184
-#define _DVSASTRIDE 0x72188
-#define _DVSAPOS 0x7218c
-#define DVS_POS_Y_MASK REG_GENMASK(31, 16)
-#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y))
-#define DVS_POS_X_MASK REG_GENMASK(15, 0)
-#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x))
-#define _DVSASIZE 0x72190
-#define DVS_HEIGHT_MASK REG_GENMASK(31, 16)
-#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h))
-#define DVS_WIDTH_MASK REG_GENMASK(15, 0)
-#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w))
-#define _DVSAKEYVAL 0x72194
-#define _DVSAKEYMSK 0x72198
-#define _DVSASURF 0x7219c
-#define DVS_ADDR_MASK REG_GENMASK(31, 12)
-#define _DVSAKEYMAXVAL 0x721a0
-#define _DVSATILEOFF 0x721a4
-#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y))
-#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x))
-#define _DVSASURFLIVE 0x721ac
-#define _DVSAGAMC_G4X 0x721e0 /* g4x */
-#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE REG_BIT(31)
-#define DVS_FILTER_MASK REG_GENMASK(30, 29)
-#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0)
-#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1)
-#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2)
-#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w))
-#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h))
-#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
-#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
-
-#define _DVSBCNTR 0x73180
-#define _DVSBLINOFF 0x73184
-#define _DVSBSTRIDE 0x73188
-#define _DVSBPOS 0x7318c
-#define _DVSBSIZE 0x73190
-#define _DVSBKEYVAL 0x73194
-#define _DVSBKEYMSK 0x73198
-#define _DVSBSURF 0x7319c
-#define _DVSBKEYMAXVAL 0x731a0
-#define _DVSBTILEOFF 0x731a4
-#define _DVSBSURFLIVE 0x731ac
-#define _DVSBGAMC_G4X 0x731e0 /* g4x */
-#define _DVSBSCALE 0x73204
-#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
-#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
-
-#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
-#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
-#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
-#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
-#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
-#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
-#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
-#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
-#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
-#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
-#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
-#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
-#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
-#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
-#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
-
-#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE REG_BIT(31)
-#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28)
-#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25)
-#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0)
-#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1)
-#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2)
-#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3)
-#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4)
-#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24)
-#define SPRITE_SOURCE_KEY REG_BIT(22)
-#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */
-#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0)
-#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1)
-#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2)
-#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3)
-#define SPRITE_ROTATE_180 REG_BIT(15)
-#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14)
-#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13)
-#define SPRITE_TILED REG_BIT(10)
-#define SPRITE_DEST_KEY REG_BIT(2)
-#define _SPRA_LINOFF 0x70284
-#define _SPRA_STRIDE 0x70288
-#define _SPRA_POS 0x7028c
-#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y))
-#define SPRITE_POS_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x))
-#define _SPRA_SIZE 0x70290
-#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h))
-#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0)
-#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w))
-#define _SPRA_KEYVAL 0x70294
-#define _SPRA_KEYMSK 0x70298
-#define _SPRA_SURF 0x7029c
-#define SPRITE_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPRA_KEYMAX 0x702a0
-#define _SPRA_TILEOFF 0x702a4
-#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y))
-#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x))
-#define _SPRA_OFFSET 0x702a4
-#define _SPRA_SURFLIVE 0x702ac
-#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE REG_BIT(31)
-#define SPRITE_FILTER_MASK REG_GENMASK(30, 29)
-#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0)
-#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1)
-#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2)
-#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27)
-#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16)
-#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w))
-#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0)
-#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h))
-#define _SPRA_GAMC 0x70400
-#define _SPRA_GAMC16 0x70440
-#define _SPRA_GAMC17 0x7044c
-
-#define _SPRB_CTL 0x71280
-#define _SPRB_LINOFF 0x71284
-#define _SPRB_STRIDE 0x71288
-#define _SPRB_POS 0x7128c
-#define _SPRB_SIZE 0x71290
-#define _SPRB_KEYVAL 0x71294
-#define _SPRB_KEYMSK 0x71298
-#define _SPRB_SURF 0x7129c
-#define _SPRB_KEYMAX 0x712a0
-#define _SPRB_TILEOFF 0x712a4
-#define _SPRB_OFFSET 0x712a4
-#define _SPRB_SURFLIVE 0x712ac
-#define _SPRB_SCALE 0x71304
-#define _SPRB_GAMC 0x71400
-#define _SPRB_GAMC16 0x71440
-#define _SPRB_GAMC17 0x7144c
-
-#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
-#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
-#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
-#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
-#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
-#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
-#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
-#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
-#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
-#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
-#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
-#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
-#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
-#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
-#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
-
-#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE REG_BIT(31)
-#define SP_PIPE_GAMMA_ENABLE REG_BIT(30)
-#define SP_FORMAT_MASK REG_GENMASK(29, 26)
-#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0)
-#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2)
-#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5)
-#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6)
-#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7)
-#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8)
-#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9)
-#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */
-#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */
-#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14)
-#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15)
-#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */
-#define SP_SOURCE_KEY REG_BIT(22)
-#define SP_YUV_FORMAT_BT709 REG_BIT(18)
-#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16)
-#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0)
-#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1)
-#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2)
-#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3)
-#define SP_ROTATE_180 REG_BIT(15)
-#define SP_TILED REG_BIT(10)
-#define SP_MIRROR REG_BIT(8) /* CHV pipe B */
-#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
-#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
-#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
-#define SP_POS_Y_MASK REG_GENMASK(31, 16)
-#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y))
-#define SP_POS_X_MASK REG_GENMASK(15, 0)
-#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x))
-#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
-#define SP_HEIGHT_MASK REG_GENMASK(31, 16)
-#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h))
-#define SP_WIDTH_MASK REG_GENMASK(15, 0)
-#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w))
-#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
-#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
-#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
-#define SP_ADDR_MASK REG_GENMASK(31, 12)
-#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
-#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
-#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16)
-#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y))
-#define SP_OFFSET_X_MASK REG_GENMASK(15, 0)
-#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x))
-#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
-#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
-#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
-#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
-#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
-#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
-#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
-#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0)
-#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */
-#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
-#define SP_SH_SIN_MASK REG_GENMASK(26, 16)
-#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */
-#define SP_SH_COS_MASK REG_GENMASK(9, 0)
-#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
-
-#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
-#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
-#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
-#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
-#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
-#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
-#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
-#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
-#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
-#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
-#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
-#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
-#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
-#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
-
-#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
-#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
-
-#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
-#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
-#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
-#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
-#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
-#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
-#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
-#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
-#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
-#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
-#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
-#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
-#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
-#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
-
-/*
- * CHV pipe B sprite CSC
- *
- * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
- * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
- * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
- */
-#define _MMIO_CHV_SPCSC(plane_id, reg) \
- _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
-
-#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
-#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
-#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
-#define SPCSC_OOFF_MASK REG_GENMASK(26, 16)
-#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IOFF_MASK REG_GENMASK(10, 0)
-#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
-#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
-#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
-#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
-#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
-#define SPCSC_C1_MASK REG_GENMASK(30, 16)
-#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */
-#define SPCSC_C0_MASK REG_GENMASK(14, 0)
-#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */
-
-#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
-#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
-#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
-#define SPCSC_IMAX_MASK REG_GENMASK(26, 16)
-#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */
-#define SPCSC_IMIN_MASK REG_GENMASK(10, 0)
-#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */
-
-#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
-#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
-#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
-#define SPCSC_OMAX_MASK REG_GENMASK(25, 16)
-#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */
-#define SPCSC_OMIN_MASK REG_GENMASK(9, 0)
-#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */
-
/* Skylake plane registers */
#define _PLANE_CTL_1_A 0x70180
@@ -3990,14 +2890,14 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
-#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
-#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
-#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
-#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
-#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
-#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
-#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
+#define PIPE_DATA_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+#define PIPE_DATA_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+#define PIPE_DATA_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+#define PIPE_DATA_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+#define PIPE_LINK_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+#define PIPE_LINK_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+#define PIPE_LINK_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+#define PIPE_LINK_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4555,6 +3455,11 @@
#define GLK_CL1_PWR_DOWN REG_BIT(11)
#define GLK_CL0_PWR_DOWN REG_BIT(10)
+#define CHICKEN_MISC_3 _MMIO(0x42088)
+#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A)
+#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A)
+#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A)
+
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
@@ -4599,7 +3504,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
@@ -4611,7 +3516,9 @@
#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define DP_FEC_BS_JITTER_WA REG_BIT(15)
#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4)
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
@@ -5010,27 +3917,29 @@
#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
/* Per-transcoder DIP controls (VLV) */
-#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
-#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
-
-#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
-#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
-#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
-
-#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
-#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
-#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
-
-#define VLV_TVIDEO_DIP_CTL(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
- _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
-#define VLV_TVIDEO_DIP_DATA(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
- _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
-#define VLV_TVIDEO_DIP_GCP(pipe) \
- _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
- _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
+#define _VLV_VIDEO_DIP_CTL_A 0x60200
+#define _VLV_VIDEO_DIP_CTL_B 0x61170
+#define _CHV_VIDEO_DIP_CTL_C 0x611f0
+#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_CTL_A, \
+ _VLV_VIDEO_DIP_CTL_B, \
+ _CHV_VIDEO_DIP_CTL_C)
+
+#define _VLV_VIDEO_DIP_DATA_A 0x60208
+#define _VLV_VIDEO_DIP_DATA_B 0x61174
+#define _CHV_VIDEO_DIP_DATA_C 0x611f4
+#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_DATA_A, \
+ _VLV_VIDEO_DIP_DATA_B, \
+ _CHV_VIDEO_DIP_DATA_C)
+
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8
+#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \
+ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
@@ -5040,6 +3949,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
@@ -5054,6 +3964,7 @@
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
@@ -5073,22 +3984,25 @@
#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
-#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
+/*ADLP and later: */
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
-#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
+#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -5401,7 +4315,7 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
-#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
@@ -5566,15 +4480,6 @@ enum skl_power_gate {
((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-#define _ICL_AUX_ANAOVRD1_A 0x162398
-#define _ICL_AUX_ANAOVRD1_B 0x6C398
-#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
- _ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B))
-#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
-#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -5583,7 +4488,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
-#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
+#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
@@ -5638,7 +4543,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
@@ -5651,7 +4556,7 @@ enum skl_power_gate {
#define _DP_TP_CTL_B 0x64140
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -5677,7 +4582,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_B 0x64144
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -5858,14 +4763,14 @@ enum skl_power_gate {
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
-#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
+#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC)
/* See DP_MSA_MISC_* for the bit definitions */
#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
-#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
+#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY)
#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
@@ -5900,7 +4805,9 @@ enum skl_power_gate {
#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
-#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_BIT(25)
+#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25)
+#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0)
+#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
@@ -6317,7 +5224,7 @@ enum skl_power_gate {
#define _VLV_PIPE_MSA_MISC_A 0x70048
#define VLV_PIPE_MSA_MISC(pipe) \
- _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
+ _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
@@ -6390,7 +5297,7 @@ enum skl_power_gate {
#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
-#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 0d735d5c2b35..942345548bc3 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -126,7 +126,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
return 0;
err_free_blocks:
- drm_buddy_free_list(mm, &bman_res->blocks);
+ drm_buddy_free_list(mm, &bman_res->blocks, 0);
mutex_unlock(&bman->lock);
err_free_res:
ttm_resource_fini(man, &bman_res->base);
@@ -141,7 +141,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
- drm_buddy_free_list(&bman->mm, &bman_res->blocks);
+ drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
bman->visible_avail += bman_res->used_visible_size;
mutex_unlock(&bman->lock);
@@ -345,7 +345,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
- drm_buddy_free_list(mm, &bman->reserved);
+ drm_buddy_free_list(mm, &bman->reserved, 0);
drm_buddy_fini(mm);
bman->visible_avail += bman->visible_reserved;
WARN_ON_ONCE(bman->visible_avail != bman->visible_size);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b45ef0560611..06ec6ceb61d5 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -73,20 +73,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows_t(T, A, B) \
- __builtin_add_overflow_p((A), (B), (T)0)
-#else
-#define add_overflows_t(T, A, B) ({ \
- typeof(A) a = (A); \
- typeof(B) b = (B); \
- (T)(a + b) < a; \
-})
-#endif
-
-#define add_overflows(A, B) \
- add_overflows_t(typeof((A) + (B)), (A), (B))
-
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34ba37..d2f064d2525c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
@@ -1740,8 +1776,6 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt,
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
- /* Wait for async active retire */
- i915_active_wait(&vma->active);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 9c21ce69bd98..1dc5281b2ade 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -28,6 +28,7 @@
#include "display/intel_de.h"
#include "display/intel_display.h"
#include "display/intel_display_trace.h"
+#include "display/intel_fbc_regs.h"
#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
@@ -105,12 +106,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: bxt
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:bxt
- * Display WA #0883: bxt
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *i915)
@@ -349,13 +344,6 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *i915,
intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl);
}
-static void xehpsdv_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_22010146351:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-}
-
static void dg2_init_clock_gating(struct drm_i915_private *i915)
{
/* Wa_22010954014:dg2 */
@@ -363,17 +351,6 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGSI_SIDECLK_DIS);
}
-static void pvc_init_clock_gating(struct drm_i915_private *i915)
-{
- /* Wa_14012385139:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-
- /* Wa_22010954014:pvc */
- if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
-}
-
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
if (!HAS_PCH_CNP(i915))
@@ -396,13 +373,6 @@ static void cfl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: cfl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:cfl
- * Display WA #0873: cfl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *i915)
@@ -427,13 +397,6 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: kbl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:kbl
- * Display WA #0873: kbl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skl_init_clock_gating(struct drm_i915_private *i915)
@@ -452,19 +415,6 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
* Display WA #0562: skl
*/
intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS);
-
- /*
- * WaFbcNukeOnHostModify:skl
- * Display WA #0873: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
- 0, DPFC_NUKE_ON_ANY_MODIFICATION);
-
- /*
- * WaFbcHighMemBwCorruptionAvoidance:skl
- * Display WA #0883: skl
- */
- intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *i915)
@@ -762,9 +712,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
-CG_FUNCS(pvc);
CG_FUNCS(dg2);
-CG_FUNCS(xehpsdv);
CG_FUNCS(cfl);
CG_FUNCS(skl);
CG_FUNCS(kbl);
@@ -797,12 +745,8 @@ CG_FUNCS(nop);
*/
void intel_clock_gating_hooks_init(struct drm_i915_private *i915)
{
- if (IS_PONTEVECCHIO(i915))
- i915->clock_gating_funcs = &pvc_clock_gating_funcs;
- else if (IS_DG2(i915))
+ if (IS_DG2(i915))
i915->clock_gating_funcs = &dg2_clock_gating_funcs;
- else if (IS_XEHPSDV(i915))
- i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
i915->clock_gating_funcs = &cfl_clock_gating_funcs;
else if (IS_SKYLAKE(i915))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 59bea1398c91..a0a43ea07f11 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -70,9 +70,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(DG1),
PLATFORM_NAME(ALDERLAKE_S),
PLATFORM_NAME(ALDERLAKE_P),
- PLATFORM_NAME(XEHPSDV),
PLATFORM_NAME(DG2),
- PLATFORM_NAME(PONTEVECCHIO),
PLATFORM_NAME(METEORLAKE),
};
#undef PLATFORM_NAME
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index eba2f0b919c8..d1a2abc7e513 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -87,9 +87,7 @@ enum intel_platform {
INTEL_DG1,
INTEL_ALDERLAKE_S,
INTEL_ALDERLAKE_P,
- INTEL_XEHPSDV,
INTEL_DG2,
- INTEL_PONTEVECCHIO,
INTEL_METEORLAKE,
INTEL_MAX_PLATFORMS
};
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 9b6d87c8b583..5a01d60e5186 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -28,6 +28,7 @@
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "gt/shmem_utils.h"
+#include <linux/vmalloc.h>
/**
* DOC: Intel GVT-g host support
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ecc5104fd9..e1a35f70b544 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "display/bxt_dpio_phy_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
@@ -10,9 +11,11 @@
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
#include "display/intel_psr_regs.h"
+#include "display/intel_sprite_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_engine_regs.h"
@@ -1155,11 +1158,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
@@ -1180,11 +1183,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
@@ -1205,11 +1208,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d4e844128826..2d0647aca964 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -272,15 +272,11 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure
*
- * This function grabs a device-level runtime pm reference (mostly used for GEM
- * code to ensure the GTT or GT is on).
+ * This function grabs a device-level runtime pm reference.
*
- * It will _not_ power up the device but instead only check that it's powered
- * on. Therefore it is only valid to call this functions from contexts where
- * the device is known to be powered up and where trying to power it up would
- * result in hilarity and deadlocks. That pretty much means only the system
- * suspend/resume code where this is used to grab runtime pm references for
- * delayed setup down in work items.
+ * It will _not_ resume the device but instead only get an extra wakeref.
+ * Therefore it is only valid to call this functions from contexts where
+ * the device is known to be active and with another wakeref previously hold.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@@ -289,7 +285,7 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
*/
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- assert_rpm_wakelock_held(rpm);
+ assert_rpm_raw_wakeref_held(rpm);
pm_runtime_get_noresume(rpm->kdev);
intel_runtime_pm_acquire(rpm, true);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index b4162f1be765..a5adfb5d8fd2 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -102,13 +102,6 @@ static const struct intel_step_info adlp_revids[] = {
[0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
-static const struct intel_step_info xehpsdv_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0) },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1) },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0) },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0) },
-};
-
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
[0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
@@ -153,8 +146,6 @@ static u8 gmd_to_intel_step(struct drm_i915_private *i915,
return step;
}
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
-
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -178,10 +169,7 @@ void intel_step_init(struct drm_i915_private *i915)
return;
}
- if (IS_PONTEVECCHIO(i915)) {
- pvc_step_init(i915, revid);
- return;
- } else if (IS_DG2_G10(i915)) {
+ if (IS_DG2_G10(i915)) {
revids = dg2_g10_revid_step_tbl;
size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
} else if (IS_DG2_G11(i915)) {
@@ -190,9 +178,6 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_DG2_G12(i915)) {
revids = dg2_g12_revid_step_tbl;
size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
- } else if (IS_XEHPSDV(i915)) {
- revids = xehpsdv_revids;
- size = ARRAY_SIZE(xehpsdv_revids);
} else if (IS_ALDERLAKE_P_N(i915)) {
revids = adlp_n_revids;
size = ARRAY_SIZE(adlp_n_revids);
@@ -277,69 +262,6 @@ void intel_step_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->step = step;
}
-#define PVC_BD_REVID GENMASK(5, 3)
-#define PVC_CT_REVID GENMASK(2, 0)
-
-static const int pvc_bd_subids[] = {
- [0x0] = STEP_A0,
- [0x3] = STEP_B0,
- [0x4] = STEP_B1,
- [0x5] = STEP_B3,
-};
-
-static const int pvc_ct_subids[] = {
- [0x3] = STEP_A0,
- [0x5] = STEP_B0,
- [0x6] = STEP_B1,
- [0x7] = STEP_C0,
-};
-
-static int
-pvc_step_lookup(struct drm_i915_private *i915, const char *type,
- const int *table, int size, int subid)
-{
- if (subid < size && table[subid] != STEP_NONE)
- return table[subid];
-
- drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid);
-
- /*
- * As on other platforms, try to use the next higher ID if we land on a
- * gap in the table.
- */
- while (subid < size && table[subid] == STEP_NONE)
- subid++;
-
- if (subid < size) {
- drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n",
- type, subid);
- return table[subid];
- }
-
- drm_dbg(&i915->drm, "Using future steppings\n");
- return STEP_FUTURE;
-}
-
-/*
- * PVC needs special handling since we don't lookup the
- * revid in a table, but rather specific bitfields within
- * the revid for various components.
- */
-static void pvc_step_init(struct drm_i915_private *i915, int pci_revid)
-{
- int ct_subid, bd_subid;
-
- bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid);
- ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid);
-
- RUNTIME_INFO(i915)->step.basedie_step =
- pvc_step_lookup(i915, "Base Die", pvc_bd_subids,
- ARRAY_SIZE(pvc_bd_subids), bd_subid);
- RUNTIME_INFO(i915)->step.graphics_step =
- pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids,
- ARRAY_SIZE(pvc_ct_subids), ct_subid);
-}
-
#define STEP_NAME_CASE(name) \
case STEP_##name: \
return #name;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 76400e9c40f0..729409a4bada 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1106,45 +1106,6 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range pvc_shadowed_regs[] = {
- { .start = 0x2030, .end = 0x2030 },
- { .start = 0x2510, .end = 0x2550 },
- { .start = 0xA008, .end = 0xA00C },
- { .start = 0xA188, .end = 0xA188 },
- { .start = 0xA278, .end = 0xA278 },
- { .start = 0xA540, .end = 0xA56C },
- { .start = 0xC4C8, .end = 0xC4C8 },
- { .start = 0xC4E0, .end = 0xC4E0 },
- { .start = 0xC600, .end = 0xC600 },
- { .start = 0xC658, .end = 0xC658 },
- { .start = 0x22030, .end = 0x22030 },
- { .start = 0x22510, .end = 0x22550 },
- { .start = 0x1C0030, .end = 0x1C0030 },
- { .start = 0x1C0510, .end = 0x1C0550 },
- { .start = 0x1C4030, .end = 0x1C4030 },
- { .start = 0x1C4510, .end = 0x1C4550 },
- { .start = 0x1C8030, .end = 0x1C8030 },
- { .start = 0x1C8510, .end = 0x1C8550 },
- { .start = 0x1D0030, .end = 0x1D0030 },
- { .start = 0x1D0510, .end = 0x1D0550 },
- { .start = 0x1D4030, .end = 0x1D4030 },
- { .start = 0x1D4510, .end = 0x1D4550 },
- { .start = 0x1D8030, .end = 0x1D8030 },
- { .start = 0x1D8510, .end = 0x1D8550 },
- { .start = 0x1E0030, .end = 0x1E0030 },
- { .start = 0x1E0510, .end = 0x1E0550 },
- { .start = 0x1E4030, .end = 0x1E4030 },
- { .start = 0x1E4510, .end = 0x1E4550 },
- { .start = 0x1E8030, .end = 0x1E8030 },
- { .start = 0x1E8510, .end = 0x1E8550 },
- { .start = 0x1F0030, .end = 0x1F0030 },
- { .start = 0x1F0510, .end = 0x1F0550 },
- { .start = 0x1F4030, .end = 0x1F4030 },
- { .start = 0x1F4510, .end = 0x1F4550 },
- { .start = 0x1F8030, .end = 0x1F8030 },
- { .start = 0x1F8510, .end = 0x1F8550 },
-};
-
static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
@@ -1471,195 +1432,31 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
0x1d3f00 - 0x1d3fff: VD2 */
};
-/*
- * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
- * switching it from the GT domain to the render domain.
- */
-#define XEHP_FWRANGES(FW_RANGE_D800) \
- GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
- 0x0 - 0xaff: reserved \
- 0xb00 - 0x1fff: always on */ \
- GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
- 0x4b00 - 0x4fff: reserved \
- 0x5000 - 0x51ff: always on */ \
- GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
- 0x8160 - 0x817f: reserved \
- 0x8180 - 0x81ff: always on */ \
- GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
- 0x8500 - 0x87ff: gt \
- 0x8800 - 0x8c7f: reserved \
- 0x8c80 - 0x8cff: gt (DG2 only) */ \
- GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
- 0x8d00 - 0x8dff: render (DG2 only) \
- 0x8e00 - 0x8fff: reserved */ \
- GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
- 0x9000 - 0x947f: gt \
- 0x9480 - 0x94cf: reserved */ \
- GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
- 0x9560 - 0x95ff: always on \
- 0x9600 - 0x967f: reserved */ \
- GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
- 0x9680 - 0x96ff: render (DG2 only) \
- 0x9700 - 0x97ff: reserved */ \
- GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
- 0x9800 - 0xb4ff: gt \
- 0xb500 - 0xbfff: reserved \
- 0xc000 - 0xcfff: gt */ \
- GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
- GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
- GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
- 0xdd00 - 0xddff: gt \
- 0xde00 - 0xde7f: reserved */ \
- GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
- 0xde80 - 0xdfff: render \
- 0xe000 - 0xe0ff: reserved \
- 0xe100 - 0xe8ff: render */ \
- GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
- 0xe900 - 0xe9ff: gt \
- 0xea00 - 0xefff: reserved \
- 0xf000 - 0xffff: gt */ \
- GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
- 0x10000 - 0x11fff: reserved \
- 0x12000 - 0x127ff: always on \
- 0x12800 - 0x12fff: reserved */ \
- GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
- GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x13200 - 0x133ff: VD2 (DG2 only) \
- 0x13400 - 0x13fff: reserved */ \
- GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
- GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
- 0x15000 - 0x15fff: gt (DG2 only) \
- 0x16000 - 0x16dff: reserved */ \
- GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
- GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
- 0x21000 - 0x21fff: reserved */ \
- GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
- 0x24000 - 0x2407f: always on \
- 0x24080 - 0x2417f: reserved */ \
- GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
- 0x24180 - 0x241ff: gt \
- 0x24200 - 0x249ff: reserved */ \
- GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
- 0x24a00 - 0x24a7f: render \
- 0x24a80 - 0x251ff: reserved */ \
- GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
- 0x25200 - 0x252ff: gt \
- 0x25300 - 0x25fff: reserved */ \
- GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
- 0x26000 - 0x27fff: render \
- 0x28000 - 0x29fff: reserved \
- 0x2a000 - 0x2ffff: undocumented */ \
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
- GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
- GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
- 0x1c0000 - 0x1c2bff: VD0 \
- 0x1c2c00 - 0x1c2cff: reserved \
- 0x1c2d00 - 0x1c2dff: VD0 \
- 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
- 0x1c3f00 - 0x1c3fff: VD0 */ \
- GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
- 0x1c4000 - 0x1c6bff: VD1 \
- 0x1c6c00 - 0x1c6cff: reserved \
- 0x1c6d00 - 0x1c6dff: VD1 \
- 0x1c6e00 - 0x1c7fff: reserved */ \
- GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
- 0x1c8000 - 0x1ca0ff: VE0 \
- 0x1ca100 - 0x1cbfff: reserved */ \
- GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
- GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
- GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
- GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
- GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
- 0x1d0000 - 0x1d2bff: VD2 \
- 0x1d2c00 - 0x1d2cff: reserved \
- 0x1d2d00 - 0x1d2dff: VD2 \
- 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
- 0x1d3e00 - 0x1d3eff: reserved \
- 0x1d3f00 - 0x1d3fff: VD2 */ \
- GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
- 0x1d4000 - 0x1d6bff: VD3 \
- 0x1d6c00 - 0x1d6cff: reserved \
- 0x1d6d00 - 0x1d6dff: VD3 \
- 0x1d6e00 - 0x1d7fff: reserved */ \
- GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
- 0x1d8000 - 0x1da0ff: VE1 \
- 0x1da100 - 0x1dffff: reserved */ \
- GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
- 0x1e0000 - 0x1e2bff: VD4 \
- 0x1e2c00 - 0x1e2cff: reserved \
- 0x1e2d00 - 0x1e2dff: VD4 \
- 0x1e2e00 - 0x1e3eff: reserved \
- 0x1e3f00 - 0x1e3fff: VD4 */ \
- GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
- 0x1e4000 - 0x1e6bff: VD5 \
- 0x1e6c00 - 0x1e6cff: reserved \
- 0x1e6d00 - 0x1e6dff: VD5 \
- 0x1e6e00 - 0x1e7fff: reserved */ \
- GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
- 0x1e8000 - 0x1ea0ff: VE2 \
- 0x1ea100 - 0x1effff: reserved */ \
- GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
- 0x1f0000 - 0x1f2bff: VD6 \
- 0x1f2c00 - 0x1f2cff: reserved \
- 0x1f2d00 - 0x1f2dff: VD6 \
- 0x1f2e00 - 0x1f3eff: reserved \
- 0x1f3f00 - 0x1f3fff: VD6 */ \
- GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
- 0x1f4000 - 0x1f6bff: VD7 \
- 0x1f6c00 - 0x1f6cff: reserved \
- 0x1f6d00 - 0x1f6dff: VD7 \
- 0x1f6e00 - 0x1f7fff: reserved */ \
- GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
-
-static const struct intel_forcewake_range __xehp_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_GT)
-};
-
static const struct intel_forcewake_range __dg2_fw_ranges[] = {
- XEHP_FWRANGES(FORCEWAKE_RENDER)
-};
-
-static const struct intel_forcewake_range __pvc_fw_ranges[] = {
- GEN_FW_RANGE(0x0, 0xaff, 0),
- GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
- GEN_FW_RANGE(0xc00, 0xfff, 0),
- GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /*
+ 0x0 - 0xaff: reserved
+ 0xb00 - 0x1fff: always on */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
- GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
- 0x4000 - 0x4aff: gt
+ GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
0x4b00 - 0x4fff: reserved
- 0x5000 - 0x51ff: gt
- 0x5200 - 0x52ff: reserved
- 0x5300 - 0x53ff: gt
- 0x5400 - 0x7fff: reserved
- 0x8000 - 0x813f: gt */
- GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8180, 0x81ff, 0),
- GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
- 0x8200 - 0x82ff: gt
- 0x8300 - 0x84ff: reserved
- 0x8500 - 0x887f: gt
- 0x8880 - 0x8a7f: reserved
- 0x8a80 - 0x8aff: gt
- 0x8b00 - 0x8fff: reserved
+ 0x5000 - 0x51ff: always on */
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
+ 0x8160 - 0x817f: reserved
+ 0x8180 - 0x81ff: always on */
+ GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
+ 0x8500 - 0x87ff: gt
+ 0x8800 - 0x8c7f: reserved
+ 0x8c80 - 0x8cff: gt (DG2 only) */
+ GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
+ 0x8d00 - 0x8dff: render (DG2 only)
+ 0x8e00 - 0x8fff: reserved */
+ GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
0x9000 - 0x947f: gt
0x9480 - 0x94cf: reserved */
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
@@ -1673,65 +1470,114 @@ static const struct intel_forcewake_range __pvc_fw_ranges[] = {
0x9800 - 0xb4ff: gt
0xb500 - 0xbfff: reserved
0xc000 - 0xcfff: gt */
- GEN_FW_RANGE(0xd000, 0xd3ff, 0),
- GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xd000, 0xd7ff, 0),
+ GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
0xdd00 - 0xddff: gt
0xde00 - 0xde7f: reserved */
GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
- 0xde80 - 0xdeff: render
- 0xdf00 - 0xe1ff: reserved
- 0xe200 - 0xe7ff: render
- 0xe800 - 0xe8ff: reserved */
- GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
- 0xe900 - 0xe9ff: gt
- 0xea00 - 0xebff: reserved
- 0xec00 - 0xffff: gt
- 0x10000 - 0x11fff: reserved */
- GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
+ 0xde80 - 0xdfff: render
+ 0xe000 - 0xe0ff: reserved
+ 0xe100 - 0xe8ff: render */
+ GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
+ 0xe900 - 0xe9ff: gt
+ 0xea00 - 0xefff: reserved
+ 0xf000 - 0xffff: gt */
+ GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
+ 0x10000 - 0x11fff: reserved
0x12000 - 0x127ff: always on
0x12800 - 0x12fff: reserved */
- GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
- 0x13000 - 0x135ff: gt
- 0x13600 - 0x147ff: reserved
- 0x14800 - 0x153ff: gt
- 0x15400 - 0x19fff: reserved */
- GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
- 0x1a000 - 0x1ffff: render
+ GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x13200 - 0x133ff: VD2 (DG2 only)
+ 0x13400 - 0x147ff: reserved */
+ GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
+ 0x15000 - 0x15fff: gt (DG2 only)
+ 0x16000 - 0x16dff: reserved */
+ GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
+ 0x16e00 - 0x1ffff: render
0x20000 - 0x21fff: reserved */
GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
- 24000 - 0x2407f: always on
- 24080 - 0x2417f: reserved */
- GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
+ 0x24000 - 0x2407f: always on
+ 0x24080 - 0x2417f: reserved */
+ GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
0x24180 - 0x241ff: gt
- 0x24200 - 0x251ff: reserved
+ 0x24200 - 0x249ff: reserved */
+ GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
+ 0x24a00 - 0x24a7f: render
+ 0x24a80 - 0x251ff: reserved */
+ GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
0x25200 - 0x252ff: gt
0x25300 - 0x25fff: reserved */
GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
0x26000 - 0x27fff: render
- 0x28000 - 0x2ffff: reserved */
+ 0x28000 - 0x29fff: reserved
+ 0x2a000 - 0x2ffff: undocumented */
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
0x1c0000 - 0x1c2bff: VD0
0x1c2c00 - 0x1c2cff: reserved
0x1c2d00 - 0x1c2dff: VD0
- 0x1c2e00 - 0x1c3eff: reserved
+ 0x1c2e00 - 0x1c3eff: VD0
0x1c3f00 - 0x1c3fff: VD0 */
- GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
- 0x1c4000 - 0x1c6aff: VD1
- 0x1c6b00 - 0x1c7eff: reserved
- 0x1c7f00 - 0x1c7fff: VD1
- 0x1c8000 - 0x1cffff: reserved */
- GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
- 0x1d0000 - 0x1d2aff: VD2
- 0x1d2b00 - 0x1d3eff: reserved
- 0x1d3f00 - 0x1d3fff: VD2
- 0x1d4000 - 0x23ffff: reserved */
- GEN_FW_RANGE(0x240000, 0x3dffff, 0),
- GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
+ 0x1c4000 - 0x1c6bff: VD1
+ 0x1c6c00 - 0x1c6cff: reserved
+ 0x1c6d00 - 0x1c6dff: VD1
+ 0x1c6e00 - 0x1c7fff: reserved */
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
+ 0x1c8000 - 0x1ca0ff: VE0
+ 0x1ca100 - 0x1cbfff: reserved */
+ GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
+ GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x1d0000 - 0x1d2bff: VD2
+ 0x1d2c00 - 0x1d2cff: reserved
+ 0x1d2d00 - 0x1d2dff: VD2
+ 0x1d2e00 - 0x1d3dff: VD2
+ 0x1d3e00 - 0x1d3eff: reserved
+ 0x1d3f00 - 0x1d3fff: VD2 */
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
+ 0x1d4000 - 0x1d6bff: VD3
+ 0x1d6c00 - 0x1d6cff: reserved
+ 0x1d6d00 - 0x1d6dff: VD3
+ 0x1d6e00 - 0x1d7fff: reserved */
+ GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
+ 0x1d8000 - 0x1da0ff: VE1
+ 0x1da100 - 0x1dffff: reserved */
+ GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
+ 0x1e0000 - 0x1e2bff: VD4
+ 0x1e2c00 - 0x1e2cff: reserved
+ 0x1e2d00 - 0x1e2dff: VD4
+ 0x1e2e00 - 0x1e3eff: reserved
+ 0x1e3f00 - 0x1e3fff: VD4 */
+ GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
+ 0x1e4000 - 0x1e6bff: VD5
+ 0x1e6c00 - 0x1e6cff: reserved
+ 0x1e6d00 - 0x1e6dff: VD5
+ 0x1e6e00 - 0x1e7fff: reserved */
+ GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
+ 0x1e8000 - 0x1ea0ff: VE2
+ 0x1ea100 - 0x1effff: reserved */
+ GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
+ 0x1f0000 - 0x1f2bff: VD6
+ 0x1f2c00 - 0x1f2cff: reserved
+ 0x1f2d00 - 0x1f2dff: VD6
+ 0x1f2e00 - 0x1f3eff: reserved
+ 0x1f3f00 - 0x1f3fff: VD6 */
+ GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
+ 0x1f4000 - 0x1f6bff: VD7
+ 0x1f6c00 - 0x1f6cff: reserved
+ 0x1f6d00 - 0x1f6dff: VD7
+ 0x1f6e00 - 0x1f7fff: reserved */
+ GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
};
static const struct intel_forcewake_range __mtl_fw_ranges[] = {
@@ -2576,18 +2422,10 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
- ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) >= 12) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
@@ -2734,7 +2572,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
* the forcewake domain if any of the other engines
* in the same media slice are present.
*/
- if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index ee79e0809a6d..fee76c1d2f45 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -154,6 +154,30 @@ __wait_gsc_proxy_completed(struct drm_i915_private *i915)
pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
}
+static void
+__wait_gsc_huc_load_completed(struct drm_i915_private *i915)
+{
+ /* this only applies to DG2, so we only care about GT0 */
+ struct intel_huc *huc = &to_gt(i915)->uc.huc;
+ bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_PXP) &&
+ intel_huc_wait_required(huc));
+ /*
+ * The GSC and PXP mei bringup depends on the kernel boot ordering, so
+ * to account for the worst case scenario the HuC code waits for up to
+ * 10s for the GSC driver to load and then another 5s for the PXP
+ * component to bind before giving up, even though those steps normally
+ * complete in less than a second from the i915 load. We match that
+ * timeout here, but we expect to bail early due to the fence being
+ * signalled even in a failure case, as it is extremely unlikely that
+ * both components will use their full timeout.
+ */
+ unsigned long timeout_ms = 15000;
+
+ if (need_to_wait &&
+ wait_for(i915_sw_fence_done(&huc->delayed_load.fence), timeout_ms))
+ pr_warn(DRIVER_NAME "Timed out waiting for huc load via GSC!\n");
+}
+
static int __run_selftests(const char *name,
struct selftest *st,
unsigned int count,
@@ -228,14 +252,16 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.live)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(live, pdev_to_i915(pdev));
+ err = run_selftests(live, i915);
if (err) {
i915_selftest.live = err;
return err;
@@ -251,14 +277,16 @@ int i915_live_selftests(struct pci_dev *pdev)
int i915_perf_selftests(struct pci_dev *pdev)
{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
int err;
if (!i915_selftest.perf)
return 0;
- __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+ __wait_gsc_proxy_completed(i915);
+ __wait_gsc_huc_load_completed(i915);
- err = run_selftests(perf, pdev_to_i915(pdev));
+ err = run_selftests(perf, i915);
if (err) {
i915_selftest.perf = err;
return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 4f98aa8a861e..41eaa9b7f67d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -71,7 +71,6 @@ static int intel_shadow_table_check(void)
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
- { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
@@ -119,8 +118,6 @@ int intel_uncore_mock_selftests(void)
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
- { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
- { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
};
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 15492b69f698..e3287f1de774 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -681,6 +681,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
if (ret)
return;
+ drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
+
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index ffa195560d0d..68291412f4cb 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -9,7 +9,6 @@
#include "vlv_sideband.h"
#include "display/intel_dpio_phy.h"
-#include "display/intel_display_types.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and