summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig9
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c55
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c4047
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h21
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c319
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c85
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c375
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c193
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c301
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c177
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c762
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h119
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c463
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c182
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c104
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c169
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c269
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c159
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c369
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_mg_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c250
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c441
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h461
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c408
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm_types.h (renamed from drivers/gpu/drm/i915/intel_pm_types.h)8
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c11
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c10
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c152
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h9
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c60
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c88
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_config.c5
-rw-r--r--drivers/gpu/drm/i915/i915_config.h23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c33
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c188
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h137
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c550
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c86
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h902
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h31
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c8
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c29
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h3
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c72
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4113
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c65
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c46
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c152
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c171
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.h18
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
189 files changed, 10234 insertions, 9466 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 9c0990c0ec87..98f4e44976e0 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -118,18 +118,16 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
-config DRM_I915_GVT
- bool
-
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
depends on KVM
- depends on VFIO_MDEV
+ depends on VFIO
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
+ select VFIO_MDEV
help
Choose this option if you want to enable Intel GVT-g graphics
@@ -171,3 +169,6 @@ menu "drm/i915 Unstable Evolution"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.unstable"
endmenu
+
+config DRM_I915_GVT
+ bool
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f47f00b162a4..8e46f57e4569 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -63,6 +63,7 @@ i915-y += i915_driver.o \
# core peripheral code
i915-y += \
soc/intel_dram.o \
+ soc/intel_gmch.o \
soc/intel_pch.o
# core library code
@@ -238,6 +239,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
+ display/intel_display_rps.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
@@ -266,8 +268,11 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
+ display/intel_vblank.o \
display/intel_vga.o \
+ display/intel_wm.o \
display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 24ef36ec2d3d..920d570f7594 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -17,6 +17,7 @@
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp;
-
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- trans_dp |= TRANS_DP_ENH_FRAMING;
- else
- trans_dp &= ~TRANS_DP_ENH_FRAMING;
- intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ TRANS_DP_ENH_FRAMING,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd) ?
+ TRANS_DP_ENH_FRAMING : 0);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -398,6 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (intel_dp_is_edp(intel_dp))
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void
@@ -1198,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
-static bool gm45_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1277,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return false;
@@ -1293,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -1375,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- dig_port->connected = gm45_digital_port_connected;
- else
- dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
dig_port->connected = ilk_digital_port_connected;
@@ -1389,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index c3580d96765c..448ea26786e0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
@@ -155,6 +156,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
@@ -271,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
@@ -288,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
@@ -546,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return;
@@ -562,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -627,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
new file mode 100644
index 000000000000..caef72d38798
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -0,0 +1,4047 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_atomic.h"
+#include "intel_display.h"
+#include "intel_display_trace.h"
+#include "intel_mchbar_regs.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+#include "vlv_sideband.h"
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
+struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
+};
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
+
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+
+ vlv_punit_put(dev_priv);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
+static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool was_enabled;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
+ } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_PINEVIEW(dev_priv)) {
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
+ if (enable)
+ val |= PINEVIEW_SELF_REFRESH_EN;
+ else
+ val &= ~PINEVIEW_SELF_REFRESH_EN;
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_I915GM(dev_priv)) {
+ /*
+ * FIXME can't find a bit like this for 915G, and
+ * yet it does have the related watermark in
+ * FW_BLC_SELF. What's going on?
+ */
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ } else {
+ return false;
+ }
+
+ trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ str_enabled_disabled(enable),
+ str_enabled_disabled(was_enabled));
+
+ return was_enabled;
+}
+
+/**
+ * intel_set_memory_cxsr - Configure CxSR state
+ * @dev_priv: i915 device
+ * @enable: Allow vs. disallow CxSR
+ *
+ * Allow or disallow the system to enter a special CxSR
+ * (C-state self refresh) state. What typically happens in CxSR mode
+ * is that several display FIFOs may get combined into a single larger
+ * FIFO for a particular plane (so called max FIFO mode) to allow the
+ * system to defer memory fetches longer, and the memory will enter
+ * self refresh.
+ *
+ * Note that enabling CxSR does not guarantee that the system enter
+ * this special mode, nor does it guarantee that the system stays
+ * in that mode once entered. So this just allows/disallows the system
+ * to autonomously utilize the CxSR mode. Other factors such as core
+ * C-states will affect when/if the system actually enters/exits the
+ * CxSR mode.
+ *
+ * Note that on VLV/CHV this actually only controls the max FIFO mode,
+ * and the system is free to enter/exit memory self refresh at any time
+ * even when the use of CxSR has been disallowed.
+ *
+ * While the system is actually in the CxSR/max FIFO mode, some plane
+ * control registers will not get latched on vblank. Thus in order to
+ * guarantee the system will respond to changes in the plane registers
+ * we must always disallow CxSR prior to making changes to those registers.
+ * Unfortunately the system will re-evaluate the CxSR conditions at
+ * frame start which happens after vblank start (which is when the plane
+ * registers would get latched), so we can't proceed with the plane update
+ * during the same frame where we disallowed CxSR.
+ *
+ * Certain platforms also have a deeper HPLL SR mode. Fortunately the
+ * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
+ * the hardware w.r.t. HPLL SR when writing to plane registers.
+ * Disallowing just CxSR is sufficient.
+ */
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(dev_priv, enable);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.wm.vlv.cxsr = enable;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+
+ return ret;
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int pessimal_latency_ns = 5000;
+
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
+
+ switch (pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ MISSING_CASE(pipe);
+ return;
+ }
+
+ fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
+ fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
+ fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
+ fifo_state->plane[PLANE_CURSOR] = 63;
+}
+
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pnv_display_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i945_wm_info = {
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i915_wm_info = {
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_a_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_bc_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM / 2,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i845_wm_info = {
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+/**
+ * intel_wm_method1 - Method 1 / "small buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 1 or "small buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the short term drain rate
+ * of the FIFO, ie. it does not account for blanking periods
+ * which would effectively reduce the average drain rate across
+ * a longer period. The name "small" refers to the fact the
+ * FIFO is relatively small compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\ |\
+ * | \ | \
+ * __---__---__ (- plane active, _ blanking)
+ * -> time
+ *
+ * or perhaps like this:
+ *
+ * |\|\ |\|\
+ * __----__----__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ u64 ret;
+
+ ret = mul_u32_u32(pixel_rate, cpp * latency);
+ ret = DIV_ROUND_UP_ULL(ret, 10000);
+
+ return ret;
+}
+
+/**
+ * intel_wm_method2 - Method 2 / "large buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @htotal: Pipe horizontal total
+ * @width: Plane width in pixels
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 2 or "large buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the long term drain rate
+ * of the FIFO, ie. it does account for blanking periods
+ * which effectively reduce the average drain rate across
+ * a longer period. The name "large" refers to the fact the
+ * FIFO is relatively large compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\___ |\___
+ * | \___ | \___
+ * | \ | \
+ * __ --__--__--__--__--__--__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ /*
+ * FIXME remove once all users are computing
+ * watermarks in the correct place.
+ */
+ if (WARN_ON_ONCE(htotal == 0))
+ htotal = 1;
+
+ ret = (latency * pixel_rate) / (htotal * 10000);
+ ret = (ret + 1) * width * cpp;
+
+ return ret;
+}
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @pixel_rate: pixel clock
+ * @wm: chip FIFO params
+ * @fifo_size: size of the FIFO buffer
+ * @cpp: bytes per pixel
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned int intel_calculate_wm(int pixel_rate,
+ const struct intel_watermark_params *wm,
+ int fifo_size, int cpp,
+ unsigned int latency_ns)
+{
+ int entries, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries = intel_wm_method1(pixel_rate, cpp,
+ latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
+ wm->guard_size;
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
+
+ wm_size = fifo_size - entries;
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+
+ /*
+ * Bspec seems to indicate that the value shouldn't be lower than
+ * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
+ * Lets go for 8 which is the burst size since certain platforms
+ * already use a hardcoded 8 (which is what the spec says should be
+ * done).
+ */
+ if (wm_size <= 8)
+ wm_size = 8;
+
+ return wm_size;
+}
+
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc && crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
+}
+
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc, *enabled = NULL;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pnv_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned int wm;
+
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
+ intel_set_memory_cxsr(dev_priv, false);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int cpp = fb->format->cpp[0];
+
+ /* Display SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
+ cpp, latency->display_sr);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= FW_WM(wm, SR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
+ 4, latency->cursor_sr);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
+ FW_WM(wm, CURSOR_SR));
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ cpp, latency->display_hpll_disable);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ 4, latency->cursor_hpll_disable);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= FW_WM(wm, HPLL_CURSOR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+
+ intel_set_memory_cxsr(dev_priv, true);
+ } else {
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+}
+
+/*
+ * Documentation says:
+ * "If the line size is small, the TLB fetches can get in the way of the
+ * data fetches, causing some lag in the pixel data return which is not
+ * accounted for in the above formulas. The following adjustment only
+ * needs to be applied if eight whole lines fit in the buffer at once.
+ * The WM is adjusted upwards by the difference between the FIFO size
+ * and the size of 8 whole lines. This adjustment is always performed
+ * in the actual pixel depth regardless of whether FBC is enabled or not."
+ */
+static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
+{
+ int tlb_miss = fifo_size * 64 - width * cpp * 8;
+
+ return max(0, tlb_miss);
+}
+
+static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct g4x_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
+
+static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ }
+
+ /*
+ * Zero the (unused) WM1 watermarks, and also clear all the
+ * high order bits so that there are no out of bounds values
+ * present in the registers during the reprogramming.
+ */
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ } else {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ }
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#undef FW_WM_VLV
+
+static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+
+ dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+}
+
+static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
+{
+ /*
+ * DSPCNTR[13] supposedly controls whether the
+ * primary plane can use the FIFO space otherwise
+ * reserved for the sprite plane. It's not 100% clear
+ * what the actual FIFO size is, but it looks like we
+ * can happily set both primary and sprite watermarks
+ * up to 127 cachelines. So that would seem to mean
+ * that either DSPCNTR[13] doesn't do anything, or that
+ * the total FIFO is >= 256 cachelines in size. Either
+ * way, we don't seem to have to worry about this
+ * repartitioning as the maximum watermark value the
+ * register can hold for each plane is lower than the
+ * minimum FIFO size.
+ */
+ switch (plane_id) {
+ case PLANE_CURSOR:
+ return 63;
+ case PLANE_PRIMARY:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
+ case PLANE_SPRITE0:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
+ default:
+ MISSING_CASE(plane_id);
+ return 0;
+ }
+}
+
+static int g4x_fbc_fifo_size(int level)
+{
+ switch (level) {
+ case G4X_WM_LEVEL_SR:
+ return 7;
+ case G4X_WM_LEVEL_HPLL:
+ return 15;
+ default:
+ MISSING_CASE(level);
+ return 0;
+ }
+}
+
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (latency == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ /*
+ * WaUse32BppForSRWM:ctg,elk
+ *
+ * The spec fails to list this restriction for the
+ * HPLL watermark, which seems a little strange.
+ * Let's use 32bpp for the HPLL watermark as well.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ level != G4X_WM_LEVEL_NORMAL)
+ cpp = max(cpp, 4u);
+
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+ } else if (plane->id == PLANE_PRIMARY &&
+ level == G4X_WM_LEVEL_NORMAL) {
+ wm = intel_wm_method1(pixel_rate, cpp, latency);
+ } else {
+ unsigned int small, large;
+
+ small = intel_wm_method1(pixel_rate, cpp, latency);
+ large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+
+ wm = min(small, large);
+ }
+
+ wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
+ width, cpp);
+
+ wm = DIV_ROUND_UP(wm, 64) + 2;
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
+ int level, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ /* NORMAL level doesn't have an FBC watermark */
+ level = max(level, G4X_WM_LEVEL_SR);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->fbc != value;
+ raw->fbc = value;
+ }
+
+ return dirty;
+}
+
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val);
+
+static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ bool dirty = false;
+ int level;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+ int wm, max_wm;
+
+ wm = g4x_compute_wm(crtc_state, plane_state, level);
+ max_wm = g4x_plane_fifo_size(plane_id, level);
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+
+ if (plane_id != PLANE_PRIMARY ||
+ level == G4X_WM_LEVEL_NORMAL)
+ continue;
+
+ wm = ilk_compute_fbc_wm(crtc_state, plane_state,
+ raw->plane[plane_id]);
+ max_wm = g4x_fbc_fifo_size(level);
+
+ /*
+ * FBC wm is not mandatory as we
+ * can always just disable its use.
+ */
+ if (wm > max_wm)
+ wm = USHRT_MAX;
+
+ dirty |= raw->fbc != wm;
+ raw->fbc = wm;
+ }
+
+ /* mark watermarks as invalid */
+ dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ out:
+ if (dirty) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+
+ if (plane_id == PLANE_PRIMARY)
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
+}
+
+static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (level >= dev_priv->display.wm.num_levels)
+ return false;
+
+ return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void g4x_invalidate_wms(struct intel_crtc *crtc,
+ struct g4x_wm_state *wm_state, int level)
+{
+ if (level <= G4X_WM_LEVEL_NORMAL) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_SR) {
+ wm_state->cxsr = false;
+ wm_state->sr.cursor = USHRT_MAX;
+ wm_state->sr.plane = USHRT_MAX;
+ wm_state->sr.fbc = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_HPLL) {
+ wm_state->hpll_en = false;
+ wm_state->hpll.cursor = USHRT_MAX;
+ wm_state->hpll.plane = USHRT_MAX;
+ wm_state->hpll.fbc = USHRT_MAX;
+ }
+}
+
+static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
+ int level)
+{
+ if (level < G4X_WM_LEVEL_SR)
+ return false;
+
+ if (level >= G4X_WM_LEVEL_SR &&
+ wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
+ return false;
+
+ if (level >= G4X_WM_LEVEL_HPLL &&
+ wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
+ return false;
+
+ return true;
+}
+
+static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct g4x_pipe_wm *raw;
+ enum plane_id plane_id;
+ int level;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = raw->plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->sr.fbc = raw->fbc;
+
+ wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->hpll.fbc = raw->fbc;
+
+ wm_state->hpll_en = wm_state->cxsr;
+
+ level++;
+
+ out:
+ if (level == G4X_WM_LEVEL_NORMAL)
+ return -EINVAL;
+
+ /* invalidate the higher levels */
+ g4x_invalidate_wms(crtc, wm_state, level);
+
+ /*
+ * Determine if the FBC watermark(s) can be used. IF
+ * this isn't the case we prefer to disable the FBC
+ * watermark(s) rather than disable the SR/HPLL
+ * level(s) entirely. 'level-1' is the highest valid
+ * level here.
+ */
+ wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
+
+ return 0;
+}
+
+static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ if (!dirty)
+ return 0;
+
+ return _g4x_compute_pipe_wm(crtc_state);
+}
+
+static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
+ const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
+ enum plane_id plane_id;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ intermediate->hpll_en = false;
+ goto out;
+ }
+
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm.plane[plane_id] =
+ max(optimal->wm.plane[plane_id],
+ active->wm.plane[plane_id]);
+
+ drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ }
+
+ intermediate->sr.plane = max(optimal->sr.plane,
+ active->sr.plane);
+ intermediate->sr.cursor = max(optimal->sr.cursor,
+ active->sr.cursor);
+ intermediate->sr.fbc = max(optimal->sr.fbc,
+ active->sr.fbc);
+
+ intermediate->hpll.plane = max(optimal->hpll.plane,
+ active->hpll.plane);
+ intermediate->hpll.cursor = max(optimal->hpll.cursor,
+ active->hpll.cursor);
+ intermediate->hpll.fbc = max(optimal->hpll.fbc,
+ active->hpll.fbc);
+
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->cxsr = true;
+ wm->hpll_en = true;
+ wm->fbc_en = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+ if (!wm_state->hpll_en)
+ wm->hpll_en = false;
+ if (!wm_state->fbc_en)
+ wm->fbc_en = false;
+
+ num_active_pipes++;
+ }
+
+ if (num_active_pipes != 1) {
+ wm->cxsr = false;
+ wm->hpll_en = false;
+ wm->fbc_en = false;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm;
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr;
+ if (crtc->active && wm->hpll_en)
+ wm->hpll = wm_state->hpll;
+ }
+}
+
+static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values new_wm = {};
+
+ g4x_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ g4x_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void g4x_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64);
+
+ return ret;
+}
+
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ }
+}
+
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (dev_priv->display.wm.pri_latency[level] == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ /*
+ * FIXME the formula gives values that are
+ * too big for the cursor FIFO, and hence we
+ * would never be able to use cursors. For
+ * now just hardcode the watermark.
+ */
+ wm = 63;
+ } else {
+ wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
+ dev_priv->display.wm.pri_latency[level] * 10);
+ }
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
+{
+ return (active_planes & (BIT(PLANE_SPRITE0) |
+ BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
+}
+
+static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ const int fifo_size = 511;
+ int fifo_extra, fifo_left = fifo_size;
+ int sprite0_fifo_extra = 0;
+ unsigned int total_rate;
+ enum plane_id plane_id;
+
+ /*
+ * When enabling sprite0 after sprite1 has already been enabled
+ * we tend to get an underrun unless sprite0 already has some
+ * FIFO space allcoated. Hence we always allocate at least one
+ * cacheline for sprite0 whenever sprite1 is enabled.
+ *
+ * All other plane enable sequences appear immune to this problem.
+ */
+ if (vlv_need_sprite0_fifo_workaround(active_planes))
+ sprite0_fifo_extra = 1;
+
+ total_rate = raw->plane[PLANE_PRIMARY] +
+ raw->plane[PLANE_SPRITE0] +
+ raw->plane[PLANE_SPRITE1] +
+ sprite0_fifo_extra;
+
+ if (total_rate > fifo_size)
+ return -EINVAL;
+
+ if (total_rate == 0)
+ total_rate = 1;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ unsigned int rate;
+
+ if ((active_planes & BIT(plane_id)) == 0) {
+ fifo_state->plane[plane_id] = 0;
+ continue;
+ }
+
+ rate = raw->plane[plane_id];
+ fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
+ fifo_left -= fifo_state->plane[plane_id];
+ }
+
+ fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
+ fifo_left -= sprite0_fifo_extra;
+
+ fifo_state->plane[PLANE_CURSOR] = 63;
+
+ fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
+
+ /* spread the remainder evenly */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ int plane_extra;
+
+ if (fifo_left == 0)
+ break;
+
+ if ((active_planes & BIT(plane_id)) == 0)
+ continue;
+
+ plane_extra = min(fifo_extra, fifo_left);
+ fifo_state->plane[plane_id] += plane_extra;
+ fifo_left -= plane_extra;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+
+ /* give it all to the first plane if none are active */
+ if (active_planes == 0) {
+ drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ fifo_state->plane[PLANE_PRIMARY] = fifo_left;
+ }
+
+ return 0;
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void vlv_invalidate_wms(struct intel_crtc *crtc,
+ struct vlv_wm_state *wm_state, int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm[level].plane[plane_id] = USHRT_MAX;
+
+ wm_state->sr[level].cursor = USHRT_MAX;
+ wm_state->sr[level].plane = USHRT_MAX;
+ }
+}
+
+static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
+{
+ if (wm > fifo_size)
+ return USHRT_MAX;
+ else
+ return fifo_size - wm;
+}
+
+/*
+ * Starting from 'level' set all higher
+ * levels to 'value' in the "raw" watermarks.
+ */
+static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ int level;
+ bool dirty = false;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
+ int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+ }
+
+ /* mark all higher levels as invalid */
+ dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+out:
+ if (dirty)
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+
+ return raw->plane[plane_id] <= fifo_state->plane[plane_id];
+}
+
+static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
+{
+ return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ enum plane_id plane_id;
+ int level;
+
+ /* initially allow all levels */
+ wm_state->num_levels = dev_priv->display.wm.num_levels;
+ /*
+ * Note that enabling cxsr with no primary/sprite planes
+ * enabled can wedge the pipe. Hence we only allow cxsr
+ * with exactly one enabled primary/sprite plane.
+ */
+ wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+
+ if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
+ break;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ wm_state->wm[level].plane[plane_id] =
+ vlv_invert_wm_value(raw->plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
+ raw->plane[PLANE_SPRITE0],
+ raw->plane[PLANE_SPRITE1]),
+ sr_fifo_size);
+
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
+ 63);
+ }
+
+ if (level == 0)
+ return -EINVAL;
+
+ /* limit to only levels we can actually handle */
+ wm_state->num_levels = level;
+
+ /* invalidate the higher levels */
+ vlv_invalidate_wms(crtc, wm_state, level);
+
+ return 0;
+}
+
+static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ /*
+ * DSPARB registers may have been reset due to the
+ * power well being turned off. Make sure we restore
+ * them to a consistent state even if no primary/sprite
+ * planes are initially active. We also force a FIFO
+ * recomputation so that we are sure to sanitize the
+ * FIFO setting we took over from the BIOS even if there
+ * are no active planes on the crtc.
+ */
+ if (intel_crtc_needs_modeset(crtc_state))
+ dirty = ~0;
+
+ if (!dirty)
+ return 0;
+
+ /* cursor changes don't warrant a FIFO recompute */
+ if (dirty & ~BIT(PLANE_CURSOR)) {
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct vlv_fifo_state *old_fifo_state =
+ &old_crtc_state->wm.vlv.fifo_state;
+ const struct vlv_fifo_state *new_fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int ret;
+
+ ret = vlv_compute_fifo(crtc_state);
+ if (ret)
+ return ret;
+
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ memcmp(old_fifo_state, new_fifo_state,
+ sizeof(*new_fifo_state)) != 0)
+ crtc_state->fifo_changed = true;
+ }
+
+ return _vlv_compute_pipe_wm(crtc_state);
+}
+
+#define VLV_FIFO(plane, value) \
+ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
+
+ if (!crtc_state->fifo_changed)
+ return;
+
+ sprite0_start = fifo_state->plane[PLANE_PRIMARY];
+ sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
+ fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
+
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+
+ trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
+
+ /*
+ * uncore.lock serves a double purpose here. It allows us to
+ * use the less expensive I915_{READ,WRITE}_FW() functions, and
+ * it protects the DSPARB registers from getting clobbered by
+ * parallel updates from multiple pipes.
+ *
+ * intel_pipe_update_start() has already disabled interrupts
+ * for us, so a plain spin_lock() is sufficient here.
+ */
+ spin_lock(&uncore->lock);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+ VLV_FIFO(SPRITEB, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+ VLV_FIFO(SPRITEB, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+ VLV_FIFO(SPRITEB_HI, 0x1));
+ dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+ VLV_FIFO(SPRITED, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+ VLV_FIFO(SPRITED, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+ VLV_FIFO(SPRITED_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_C:
+ dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+ VLV_FIFO(SPRITEF, 0xff));
+ dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+ VLV_FIFO(SPRITEF, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+ VLV_FIFO(SPRITEF_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ default:
+ break;
+ }
+
+ intel_uncore_posting_read_fw(uncore, DSPARB);
+
+ spin_unlock(&uncore->lock);
+}
+
+#undef VLV_FIFO
+
+static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
+ const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
+ int level;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ goto out;
+ }
+
+ intermediate->num_levels = min(optimal->num_levels, active->num_levels);
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+
+ for (level = 0; level < intermediate->num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm[level].plane[plane_id] =
+ min(optimal->wm[level].plane[plane_id],
+ active->wm[level].plane[plane_id]);
+ }
+
+ intermediate->sr[level].plane = min(optimal->sr[level].plane,
+ active->sr[level].plane);
+ intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
+ active->sr[level].cursor);
+ }
+
+ vlv_invalidate_wms(crtc, intermediate, level);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->cxsr = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+
+ num_active_pipes++;
+ wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+ }
+
+ if (num_active_pipes != 1)
+ wm->cxsr = false;
+
+ if (num_active_pipes > 1)
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm[wm->level];
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr[wm->level];
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
+ }
+}
+
+static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values new_wm = {};
+
+ vlv_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, false);
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, false);
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void vlv_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void i965_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+ bool cxsr_enabled;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp = fb->format->cpp[0];
+ int entries;
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ crtc->base.cursor->state->crtc_w, 4,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size) +
+ i965_cursor_wm_info.guard_size;
+
+ cursor_sr = i965_cursor_wm_info.fifo_size - entries;
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ cxsr_enabled = true;
+ } else {
+ cxsr_enabled = false;
+ /* Turn off self refresh if both pipes are enabled */
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
+ /* update cursor SR watermark */
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+#undef FW_WM
+
+static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+ enum i9xx_plane_id i9xx_plane)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ if (plane->id == PLANE_PRIMARY &&
+ plane->i9xx_plane == i9xx_plane)
+ return intel_crtc_for_pipe(i915, plane->pipe);
+ }
+
+ return NULL;
+}
+
+static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+{
+ const struct intel_watermark_params *wm_info;
+ u32 fwater_lo;
+ u32 fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct intel_crtc *crtc;
+
+ if (IS_I945GM(dev_priv))
+ wm_info = &i945_wm_info;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i830_a_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planea_wm = fifo_size - wm_info->guard_size;
+ if (planea_wm > (long)wm_info->max_wm)
+ planea_wm = wm_info->max_wm;
+ }
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ wm_info = &i830_bc_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planeb_wm = fifo_size - wm_info->guard_size;
+ if (planeb_wm > (long)wm_info->max_wm)
+ planeb_wm = wm_info->max_wm;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (IS_I915GM(dev_priv) && crtc) {
+ struct drm_i915_gem_object *obj;
+
+ obj = intel_fb_obj(crtc->base.primary->state->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (!i915_gem_object_is_tiled(obj))
+ crtc = NULL;
+ }
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ intel_set_memory_cxsr(dev_priv, false);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev_priv) && crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp;
+ int entries;
+
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+
+ if (crtc)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void i845_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ u32 fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ &i845_wm_info,
+ i845_get_fifo_size(dev_priv, PLANE_A),
+ 4, pessimal_latency_ns);
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method1(pixel_rate, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
+{
+ /*
+ * Neither of these should be possible since this function shouldn't be
+ * called if the CRTC is off or the plane is invisible. But let's be
+ * extra paranoid to avoid a potential divide-by-zero if we screw up
+ * elsewhere in the driver.
+ */
+ if (WARN_ON(!cpp))
+ return 0;
+ if (WARN_ON(!horiz_pixels))
+ return 0;
+
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
+}
+
+struct ilk_wm_maximums {
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
+};
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value, bool is_lp)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+
+ if (!is_lp)
+ return method1;
+
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+}
+
+/* Only for WM_LP. */
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val)
+{
+ int cpp;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp);
+}
+
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 3072;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 768;
+ else
+ return 512;
+}
+
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ /* BDW primary/sprite plane watermarks */
+ return level == 0 ? 255 : 2047;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ return level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ return level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ return level == 0 ? 63 : 255;
+}
+
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+{
+ if (DISPLAY_VER(dev_priv) >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 31;
+ else
+ return 15;
+}
+
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
+
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
+
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
+ }
+
+ /* clamp to max that the registers can hold */
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ return ilk_cursor_wm_reg_max(dev_priv, level);
+}
+
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static bool ilk_validate_wm_level(int level,
+ const struct ilk_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
+ result->enable = result->pri_val <= max->pri &&
+ result->spr_val <= max->spr &&
+ result->cur_val <= max->cur;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+ const struct intel_crtc *crtc,
+ int level,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
+ struct intel_wm_level *result)
+{
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ if (pristate) {
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
+ pri_latency, level);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
+ }
+
+ if (sprstate)
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
+
+ if (curstate)
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
+
+ result->enable = true;
+}
+
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u64 sskpd;
+
+ i915->display.wm.num_levels = 5;
+
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
+
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
+
+ i915->display.wm.num_levels = 4;
+
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
+
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
+
+ i915->display.wm.num_levels = 3;
+
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5], u16 min)
+{
+ int level;
+
+ if (wm[0] >= min)
+ return false;
+
+ wm[0] = max(wm[0], min);
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
+
+ return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+{
+ bool changed;
+
+ /*
+ * The BIOS provided WM memory latency values are often
+ * inadequate for high resolution displays. Adjust them.
+ */
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+
+ if (!changed)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * On some SNB machines (Thinkpad X220 Tablet at least)
+ * LP3 usage can cause vblank interrupts to be lost.
+ * The DEIIR bit will go high but it looks like the CPU
+ * never gets interrupted.
+ *
+ * It's not clear whether other interrupt source could
+ * be affected or if this is somehow limited to vblank
+ * interrupts only. To play it safe we disable LP3
+ * watermarks entirely.
+ */
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
+ return;
+
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+
+ if (DISPLAY_VER(dev_priv) == 6) {
+ snb_wm_latency_quirk(dev_priv);
+ snb_wm_lp3_irq_quirk(dev_priv);
+ }
+}
+
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *pipe_wm)
+{
+ /* LP0 watermark maximums depend on this pipe alone */
+ const struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = pipe_wm->sprites_enabled,
+ .sprites_scaled = pipe_wm->sprites_scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Compute new watermarks for the pipe */
+static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_pipe_wm *pipe_wm;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
+ struct ilk_wm_maximums max;
+ int level, usable_level;
+
+ pipe_wm = &crtc_state->wm.ilk.optimal;
+
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
+ }
+
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
+ pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
+ pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
+
+ usable_level = dev_priv->display.wm.num_levels - 1;
+
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ usable_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
+
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
+
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ return -EINVAL;
+
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
+
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ pristate, sprstate, curstate, wm);
+
+ /*
+ * Disable any watermark level that exceeds the
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state. These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
+ const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
+ int level;
+
+ /*
+ * Start with the final, target watermarks, then combine with the
+ * currently active watermarks to get values that are safe both before
+ * and after the vblank.
+ */
+ *a = new_crtc_state->wm.ilk.optimal;
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state) ||
+ state->skip_intermediate_wm)
+ return 0;
+
+ a->pipe_enabled |= b->pipe_enabled;
+ a->sprites_enabled |= b->sprites_enabled;
+ a->sprites_scaled |= b->sprites_scaled;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct intel_wm_level *a_wm = &a->wm[level];
+ const struct intel_wm_level *b_wm = &b->wm[level];
+
+ a_wm->enable &= b_wm->enable;
+ a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+ a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+ a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+ a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
+ }
+
+ /*
+ * We need to make sure that these merged watermark values are
+ * actually a valid configuration themselves. If they're not,
+ * there's no safe way to transition from the old state to
+ * the new state, so we need to fail the atomic transaction.
+ */
+ if (!ilk_validate_pipe_wm(dev_priv, a))
+ return -EINVAL;
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+ int level,
+ struct intel_wm_level *ret_wm)
+{
+ const struct intel_crtc *crtc;
+
+ ret_wm->enable = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
+ const struct intel_wm_level *wm = &active->wm[level];
+
+ if (!active->pipe_enabled)
+ continue;
+
+ /*
+ * The watermark values may have been used in the past,
+ * so we must maintain them in the registers for some
+ * time even if the level is now disabled.
+ */
+ if (!wm->enable)
+ ret_wm->enable = false;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, num_levels = dev_priv->display.wm.num_levels;
+ int last_enabled_level = num_levels - 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ config->num_pipes_active > 1)
+ last_enabled_level = 0;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+
+ /* merge each WM1+ level */
+ for (level = 1; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev_priv, level, wm);
+
+ if (level > last_enabled_level)
+ wm->enable = false;
+ else if (!ilk_validate_wm_level(level, max, wm))
+ /* make sure all following levels get disabled */
+ last_enabled_level = level - 1;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ if (wm->enable)
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
+ }
+ }
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
+ dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ for (level = 2; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
+{
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ return 2 * level;
+ else
+ return dev_priv->display.wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct ilk_wm_values *results)
+{
+ struct intel_crtc *crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+
+ /* LP1+ register values */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ const struct intel_wm_level *r;
+
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+
+ /*
+ * Maintain the watermark values even if the level is
+ * disabled. Doing otherwise could cause underruns.
+ */
+ results->wm_lp[wm_lp - 1] =
+ WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_PRIMARY(r->pri_val) |
+ WM_LP_CURSOR(r->cur_val);
+
+ if (r->enable)
+ results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
+ else
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
+
+ results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
+
+ /*
+ * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
+ * level is disabled. Doing otherwise could cause underruns.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
+ }
+ }
+
+ /* LP0 register values */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+ const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
+
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ continue;
+
+ results->wm_pipe[pipe] =
+ WM0_PIPE_PRIMARY(r->pri_val) |
+ WM0_PIPE_SPRITE(r->spr_val) |
+ WM0_PIPE_CURSOR(r->cur_val);
+ }
+}
+
+/*
+ * Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same.
+ */
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
+{
+ int level, level1 = 0, level2 = 0;
+
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
+ }
+
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
+ return r2;
+ else
+ return r1;
+ } else if (level1 > level2) {
+ return r1;
+ } else {
+ return r2;
+ }
+}
+
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
+ previous->wm_lp[2] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
+ previous->wm_lp[1] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
+ previous->wm_lp[0] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ unsigned int dirty;
+
+ dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ if (!dirty)
+ return;
+
+ _ilk_disable_lp_wm(dev_priv, dirty);
+
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+
+ if (dirty & WM_DIRTY_DDB) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
+ else
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
+ }
+
+ if (dirty & WM_DIRTY_FBC)
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->display.wm.hw = *results;
+}
+
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
+{
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+ struct intel_wm_config *config)
+{
+ struct intel_crtc *crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct ilk_wm_maximums max;
+ struct intel_wm_config config = {};
+ struct ilk_wm_values results = {};
+ enum intel_ddb_partitioning partitioning;
+
+ ilk_compute_wm_config(dev_priv, &config);
+
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (DISPLAY_VER(dev_priv) >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ } else {
+ best_lp_wm = &lp_wm_1_2;
+ }
+
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+
+ ilk_write_wm_values(dev_priv, &results);
+}
+
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
+ enum pipe pipe = crtc->pipe;
+
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+
+ memset(active, 0, sizeof(*active));
+
+ active->pipe_enabled = crtc->active;
+
+ if (active->pipe_enabled) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
+ active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
+ active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
+ } else {
+ int level;
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ active->wm[level].enable = true;
+ }
+
+ crtc->wm.active.ilk = *active;
+}
+
+static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ return;
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ intel_state = to_intel_atomic_state(state);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ if (!HAS_GMCH(dev_priv))
+ intel_state->skip_intermediate_wm = true;
+
+ ret = ilk_sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
+
+ /* Write calculated watermark values back */
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ intel_optimize_watermarks(intel_state, crtc);
+
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
+ }
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+#define _FW_WM(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
+ wm->sr.fbc = _FW_WM(tmp, FBC_SR);
+ wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+ wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
+ wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
+}
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+ u32 tmp;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] =
+ (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_CURSOR] =
+ (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE0] =
+ (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE1] =
+ (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ }
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ } else {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct intel_crtc *crtc;
+
+ g4x_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_pipe_wm *raw;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level, max_level;
+
+ active->cxsr = wm->cxsr;
+ active->hpll_en = wm->hpll_en;
+ active->fbc_en = wm->fbc_en;
+
+ active->sr = wm->sr;
+ active->hpll = wm->hpll;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm.plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+ }
+
+ if (wm->cxsr && wm->hpll_en)
+ max_level = G4X_WM_LEVEL_HPLL;
+ else if (wm->cxsr)
+ max_level = G4X_WM_LEVEL_SR;
+ else
+ max_level = G4X_WM_LEVEL_NORMAL;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ raw->plane[plane_id] = active->wm.plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->sr.plane;
+ raw->plane[PLANE_CURSOR] = active->sr.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->sr.fbc;
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->hpll.plane;
+ raw->plane[PLANE_CURSOR] = active->hpll.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->hpll.fbc;
+
+ level++;
+ out:
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ g4x_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ g4x_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.g4x.optimal = *active;
+ crtc_state->wm.g4x.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
+ str_yes_no(wm->fbc_en));
+}
+
+static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.g4x.raw[level];
+
+ raw->plane[plane_id] = 0;
+
+ if (plane_id == PLANE_PRIMARY)
+ raw->fbc = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _g4x_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.g4x.intermediate =
+ crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ }
+
+ g4x_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+}
+
+static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct intel_crtc *crtc;
+ u32 val;
+
+ vlv_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (val & DSP_MAXFIFO_PM5_ENABLE)
+ wm->level = VLV_WM_LEVEL_PM5;
+
+ /*
+ * If DDR DVFS is disabled in the BIOS, Punit
+ * will never ack the request. So if that happens
+ * assume we don't have to enable/disable DDR DVFS
+ * dynamically. To test that just set the REQ_ACK
+ * bit to poke the Punit, but don't change the
+ * HIGH/LOW bits so that we don't actually change
+ * the current state.
+ */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ } else {
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ }
+
+ vlv_punit_put(dev_priv);
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level;
+
+ vlv_get_fifo_size(crtc_state);
+
+ active->num_levels = wm->level + 1;
+ active->cxsr = wm->cxsr;
+
+ for (level = 0; level < active->num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ active->sr[level].plane = wm->sr.plane;
+ active->sr[level].cursor = wm->sr.cursor;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm[level].plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+
+ raw->plane[plane_id] =
+ vlv_invert_wm_value(active->wm[level].plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+ }
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ vlv_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ vlv_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.vlv.optimal = *active;
+ crtc_state->wm.vlv.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
+static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ raw->plane[plane_id] = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _vlv_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.vlv.intermediate =
+ crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ }
+
+ vlv_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+}
+
+/*
+ * FIXME should probably kill this and improve
+ * the real watermark readout/sanitation instead
+ */
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+{
+ intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+}
+
+static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc *crtc;
+
+ ilk_init_lp_watermarks(dev_priv);
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
+static const struct intel_wm_funcs ilk_wm_funcs = {
+ .compute_pipe_wm = ilk_compute_pipe_wm,
+ .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .initial_watermarks = ilk_initial_watermarks,
+ .optimize_watermarks = ilk_optimize_watermarks,
+ .get_hw_state = ilk_wm_get_hw_state,
+};
+
+static const struct intel_wm_funcs vlv_wm_funcs = {
+ .compute_pipe_wm = vlv_compute_pipe_wm,
+ .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .initial_watermarks = vlv_initial_watermarks,
+ .optimize_watermarks = vlv_optimize_watermarks,
+ .atomic_update_watermarks = vlv_atomic_update_fifo,
+ .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs g4x_wm_funcs = {
+ .compute_pipe_wm = g4x_compute_pipe_wm,
+ .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .initial_watermarks = g4x_initial_watermarks,
+ .optimize_watermarks = g4x_optimize_watermarks,
+ .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs pnv_wm_funcs = {
+ .update_wm = pnv_update_wm,
+};
+
+static const struct intel_wm_funcs i965_wm_funcs = {
+ .update_wm = i965_update_wm,
+};
+
+static const struct intel_wm_funcs i9xx_wm_funcs = {
+ .update_wm = i9xx_update_wm,
+};
+
+static const struct intel_wm_funcs i845_wm_funcs = {
+ .update_wm = i845_update_wm,
+};
+
+static const struct intel_wm_funcs nop_funcs = {
+};
+
+void i9xx_wm_init(struct drm_i915_private *dev_priv)
+{
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
+ } else if (IS_G4X(dev_priv)) {
+ g4x_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ intel_set_memory_cxsr(dev_priv, false);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ } else {
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 3) {
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 2) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1)
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
+ else
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else {
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
new file mode 100644
index 000000000000..a7875cbcd05a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I9XX_WM_H__
+#define __I9XX_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+int ilk_wm_max_level(const struct drm_i915_private *i915);
+bool ilk_disable_lp_wm(struct drm_i915_private *i915);
+void ilk_wm_sanitize(struct drm_i915_private *i915);
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
+void i9xx_wm_init(struct drm_i915_private *i915);
+
+#endif /* __I9XX_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ae14c794c4bc..50dcaa895854 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp, mode_flags;
+ u32 mode_flags;
enum port port;
mode_flags = crtc_state->mode_flags;
@@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
else
return;
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp |= DSI_FRAME_UPDATE_REQUEST;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
@@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
- u32 tmp;
+ u32 tmp, mask, val;
int lane;
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
+ mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
+ val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
+ RTERM_SELECT(0x6);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
-
+ mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK;
+ val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
+ RCOMP_SCALAR(0x98);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
-
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
-
- for (lane = 0; lane <= 3; lane++) {
- /* Bspec: must not use GRP register for write */
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK;
+ val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
+ CURSOR_COEFF(0x3f);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+
+ /* Bspec: must not use GRP register for write */
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ mask, val);
}
}
@@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
- dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
- dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp |= COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ 0, COMBO_PHY_MODE_DSI);
get_dsi_io_power_domains(dev_priv, intel_dsi);
}
@@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~LOADGEN_SELECT;
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
- for (lane = 0; lane <= 3; lane++) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~LOADGEN_SELECT;
- if (lane != 2)
- tmp |= LOADGEN_SELECT;
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~FRC_LATENCY_OPTIM_MASK;
- tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
+ FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
@@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~LATENCY_OPTIM_MASK;
- tmp |= LATENCY_OPTIM_VAL(0);
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
- tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
tmp = intel_de_read(dev_priv,
ICL_PORT_PCS_DW1_LN(0, phy));
@@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
}
/*
@@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- tmp |= SUS_CLOCK_CONFIG;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
}
/* Program swing and de-emphasis */
@@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
}
}
@@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
enum phy phy;
/* Program T-INIT master registers */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
- tmp &= ~DSI_T_INIT_MASTER_MASK;
- tmp |= intel_dsi->init_count;
- intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv,
- DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DPHY_TA_TIMING_PARAM(port),
- tmp);
+ intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
/* shadow register inside display core */
- tmp = intel_de_read(dev_priv,
- DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
}
}
}
if (IS_JSL_EHL(dev_priv)) {
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
- tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
+ 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
@@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp |= PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ 0, PORT_SYNC_MODE_ENABLE);
}
/* configure stream splitting */
@@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans),
+ HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans),
+ HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
}
}
@@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- intel_de_write(dev_priv, VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans),
+ VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
@@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans),
+ VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
}
}
@@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans),
vsync_shift);
}
}
- /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ /*
+ * program TRANS_VBLANK register, should be same as vtotal programmed
+ *
+ * FIXME get rid of these local hacks and do it right,
+ * this will not handle eg. delayed vblank correctly.
+ */
if (DISPLAY_VER(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans),
+ VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
}
}
}
@@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp |= PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(&dev_priv->drm,
"DSI transcoder not enabled\n");
}
@@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+ u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
/*
* escape clock count calculation:
@@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
- tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
- tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
+ HSTX_TIMEOUT_VALUE_MASK,
+ HSTX_TIMEOUT_VALUE(hs_tx_timeout));
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
- tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
- tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
+ LPRX_TIMEOUT_VALUE_MASK,
+ LPRX_TIMEOUT_VALUE(lp_rx_timeout));
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
- tmp &= ~TA_TIMEOUT_VALUE_MASK;
- tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
+ TA_TIMEOUT_VALUE_MASK,
+ TA_TIMEOUT_VALUE(ta_timeout));
}
}
@@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp &= ~PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(&dev_priv->drm,
"DSI trancoder not disabled\n");
}
@@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
+ DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
}
/* put dsi link in ULPS */
@@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
- tmp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans),
+ TRANS_DDI_FUNC_ENABLE, 0);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp &= ~PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ PORT_SYNC_MODE_ENABLE, 0);
}
}
}
@@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
@@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
/* set mode to DDI */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp &= ~COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ COMBO_PHY_MODE_DSI, 0);
}
static void gen11_dsi_disable(struct intel_atomic_state *state,
@@ -1754,8 +1680,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- ret = tmp & PIPECONF_ENABLE;
+ tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans));
+ ret = tmp & TRANSCONF_ENABLE;
}
out:
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2043,7 +1969,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
@@ -2054,7 +1981,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 10e1fc9d0698..719a60e278f3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -34,10 +34,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include "gt/intel_rps.h"
-
+#include "i915_config.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -362,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->async_flip_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -581,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
intel_plane_is_scaled(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
new_crtc_state->do_async_flip = true;
+ new_crtc_state->async_flip_planes |= BIT(plane->id);
+ }
return 0;
}
@@ -937,64 +940,6 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
-{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
-
- /*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
- */
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
-
- if (!dma_fence_is_i915(fence))
- return;
-
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
-
- if (drm_crtc_vblank_get(crtc))
- return;
-
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
- }
-
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
-
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -1085,13 +1030,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
dma_resv_iter_begin(&cursor, obj->base.resv,
DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
}
dma_resv_iter_end(&cursor);
} else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -1102,10 +1047,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
- state->rps_interactive = true;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, true);
return 0;
@@ -1136,10 +1078,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (!obj)
return;
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
- state->rps_interactive = false;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, false);
/* Should only be called after a successful intel_prepare_plane_fb()! */
intel_plane_unpin_fb(old_plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 626c47e96a6d..65151f5dcb15 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -71,6 +71,8 @@ struct intel_audio_funcs {
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+ void (*audio_codec_get_config)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
};
/* DP N/M table */
@@ -314,6 +316,27 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
+static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 *eld = (u32 *)crtc_state->eld;
+ int eld_buffer_size, len, i;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ if ((tmp & G4X_ELD_VALID) == 0)
+ return;
+
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+
+ eld_buffer_size = g4x_eld_buffer_size(i915);
+ len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
+
+ for (i = 0; i < len; i++)
+ eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+}
+
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -335,8 +358,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
+ const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
@@ -345,7 +367,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
eld_buffer_size = g4x_eld_buffer_size(i915);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
+ len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
@@ -459,17 +481,6 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
-/* ELD buffer size in dwords */
-static int hsw_eld_buffer_size(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- u32 tmp;
-
- tmp = intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -570,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
@@ -581,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val = intel_de_read(i915, AUD_CONFIG_BE);
if (DISPLAY_VER(i915) == 11)
- val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
else if (DISPLAY_VER(i915) >= 12)
- val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+ val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
/* Get hblank early enable value required */
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder);
hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
if (hblank_early_prog < 32)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32);
else if (hblank_early_prog < 64)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64);
else if (hblank_early_prog < 96)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96);
else
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128);
/* Get samples room value required */
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder);
samples_room = calc_samples_room(crtc_state);
if (samples_room < 3)
- val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room);
else /* Program 0 i.e "All Samples available in buffer" */
- val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
intel_de_write(i915, AUD_CONFIG_BE, val);
@@ -618,10 +628,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const u32 *eld = (const u32 *)connector->eld;
- int eld_buffer_size, len, i;
mutex_lock(&i915->display.audio.mutex);
@@ -639,25 +646,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder),
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = hsw_eld_buffer_size(i915, cpu_transcoder);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)) &
- IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
- 0, AUDIO_ELD_VALID(cpu_transcoder));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -665,47 +657,33 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&i915->display.audio.mutex);
}
-struct ilk_audio_regs {
+struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ilk_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct drm_i915_private *i915,
enum pipe pipe,
- struct ilk_audio_regs *regs)
+ struct ibx_audio_regs *regs)
{
- if (HAS_PCH_IBX(i915)) {
- regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- regs->aud_config = IBX_AUD_CFG(pipe);
- regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
+ } else if (HAS_PCH_CPT(i915)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ } else if (HAS_PCH_IBX(i915)) {
+ regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = IBX_AUD_CFG(pipe);
+ regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
}
}
-/* ELD buffer size in dwords */
-static int ilk_eld_buffer_size(struct drm_i915_private *i915,
- enum pipe pipe)
-{
- struct ilk_audio_regs regs;
- u32 tmp;
-
- ilk_audio_regs_init(i915, pipe, &regs);
-
- tmp = intel_de_read(i915, regs.aud_cntl_st);
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
-static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -713,12 +691,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -741,25 +719,22 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
intel_crtc_wait_for_next_vblank(crtc);
}
-static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- int eld_buffer_size, len, i;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -767,24 +742,10 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, regs.aud_cntl_st,
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = ilk_eld_buffer_size(i915, pipe);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, regs.aud_cntl_st) & IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
- 0, IBX_ELD_VALID(port));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
intel_de_rmw(i915, regs.aud_config,
@@ -809,6 +770,30 @@ void intel_audio_sdp_split_update(struct intel_encoder *encoder,
crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
}
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->eld[0]) {
+ drm_dbg_kms(&i915->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return false;
+ }
+
+ BUILD_BUG_ON(sizeof(crtc_state->eld) != sizeof(connector->eld));
+ memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
+
+ crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ return true;
+}
+
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @encoder: encoder on which to enable audio
@@ -825,27 +810,19 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
- connector->base.id, connector->name,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(connector->eld));
-
- /* FIXME precompute the ELD in .compute_config() */
- if (!connector->eld[0])
- drm_dbg_kms(&i915->drm,
- "Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ crtc->base.base.id, crtc->base.name,
+ drm_eld_size(crtc_state->eld));
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_enable(encoder,
@@ -853,22 +830,25 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = connector;
- /* referred in audio callbacks */
- i915->display.audio.encoder_map[pipe] = encoder;
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ audio_state->encoder = encoder;
+ BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
+ memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, connector->eld,
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -889,16 +869,18 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_connector *connector = old_conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
- connector->base.id, connector->name,
- encoder->base.base.id, encoder->base.name, pipe_name(pipe));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name);
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_disable(encoder,
@@ -906,35 +888,71 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
old_conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = NULL;
- i915->display.audio.encoder_map[pipe] = NULL;
+
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ audio_state->encoder = NULL;
+ memset(audio_state->eld, 0, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
+}
+
+static void intel_acomp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_audio_state *audio_state;
+
+ mutex_lock(&i915->display.audio.mutex);
+
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+
+ if (audio_state->encoder)
+ memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
+
+ mutex_unlock(&i915->display.audio.mutex);
+}
+
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (i915->display.funcs.audio)
+ i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
.audio_codec_enable = g4x_audio_codec_enable,
.audio_codec_disable = g4x_audio_codec_disable,
+ .audio_codec_get_config = g4x_audio_codec_get_config,
};
-static const struct intel_audio_funcs ilk_audio_funcs = {
- .audio_codec_enable = ilk_audio_codec_enable,
- .audio_codec_disable = ilk_audio_codec_disable,
+static const struct intel_audio_funcs ibx_audio_funcs = {
+ .audio_codec_enable = ibx_audio_codec_enable,
+ .audio_codec_disable = ibx_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
static const struct intel_audio_funcs hsw_audio_funcs = {
.audio_codec_enable = hsw_audio_codec_enable,
.audio_codec_disable = hsw_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
/**
@@ -945,12 +963,11 @@ void intel_audio_hooks_init(struct drm_i915_private *i915)
{
if (IS_G4X(i915))
i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
+ HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ i915->display.funcs.audio = &ibx_audio_funcs;
else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
i915->display.funcs.audio = &hsw_audio_funcs;
- else if (HAS_PCH_SPLIT(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -1128,59 +1145,59 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
}
/*
- * get the intel_encoder according to the parameter port and pipe
- * intel_encoder is saved by the index of pipe
- * MST & (pipe >= 0): return the audio.encoder_map[pipe],
+ * get the intel audio state according to the parameter port and cpu_transcoder
+ * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder],
* when port is matched
- * MST & (pipe < 0): this is invalid
- * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * MST & (cpu_transcoder < 0): this is invalid
+ * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry)
* will get the right intel_encoder with port matched
- * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
- int port, int pipe)
+static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+ int port, int cpu_transcoder)
{
/* MST */
- if (pipe >= 0) {
+ if (cpu_transcoder >= 0) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
if (drm_WARN_ON(&i915->drm,
- pipe >= ARRAY_SIZE(i915->display.audio.encoder_map)))
+ cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
return NULL;
- encoder = i915->display.audio.encoder_map[pipe];
- /*
- * when bootup, audio driver may not know it is
- * MST or not. So it will poll all the port & pipe
- * combinations
- */
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+ encoder = audio_state->encoder;
+
if (encoder && encoder->port == port &&
encoder->type == INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
/* Non-MST */
- if (pipe > 0)
+ if (cpu_transcoder > 0)
return NULL;
- for_each_pipe(i915, pipe) {
+ for_each_cpu_transcoder(i915, cpu_transcoder) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- encoder = i915->display.audio.encoder_map[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
+ encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
encoder->type != INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
return NULL;
}
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int pipe, int rate)
+ int cpu_transcoder, int rate)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct i915_audio_component *acomp = i915->display.audio.component;
+ const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1192,20 +1209,22 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&i915->display.audio.mutex);
- /* 1. get the pipe */
- encoder = get_saved_enc(i915, port, pipe);
- if (!encoder || !encoder->base.crtc) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+ encoder = audio_state->encoder;
+
+ /* FIXME stop using the legacy crtc pointer */
crtc = to_intel_crtc(encoder->base.crtc);
- /* port must be valid now, otherwise the pipe will be invalid */
+ /* port must be valid now, otherwise the cpu_transcoder will be invalid */
acomp->aud_sample_rate[port] = rate;
+ /* FIXME get rid of the crtc->config stuff */
hsw_audio_config_update(encoder, crtc->config);
unlock:
@@ -1215,28 +1234,26 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- int pipe, bool *enabled,
+ int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
- struct intel_encoder *intel_encoder;
- const u8 *eld;
- int ret = -EINVAL;
+ const struct intel_audio_state *audio_state;
+ int ret = 0;
mutex_lock(&i915->display.audio.mutex);
- intel_encoder = get_saved_enc(i915, port, pipe);
- if (!intel_encoder) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
mutex_unlock(&i915->display.audio.mutex);
- return ret;
+ return -EINVAL;
}
- ret = 0;
- *enabled = intel_encoder->audio_connector != NULL;
+ *enabled = audio_state->encoder != NULL;
if (*enabled) {
- eld = intel_encoder->audio_connector->eld;
+ const u8 *eld = audio_state->eld;
+
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 1b87257c6a17..07d034a981e9 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -6,18 +6,25 @@
#ifndef __INTEL_AUDIO_H__
#define __INTEL_AUDIO_H__
+#include <linux/types.h>
+
struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 5b7da72c95b8..2e8f17c04522 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -105,7 +105,8 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
@@ -283,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
}
@@ -345,27 +347,24 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
*/
tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -376,12 +375,10 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CTL2);
- intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -389,12 +386,10 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -402,19 +397,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
- if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(i915, UTIL_PIN_CTL, val);
- }
+ if (panel->backlight.controller == 1)
+ intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -422,13 +412,11 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
}
static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -458,7 +446,8 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* another client is not activated.
*/
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ connector->base.base.id, connector->base.name);
return;
}
@@ -478,30 +467,24 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
+ u32 pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915)) {
- schicken = intel_de_read(i915, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(i915, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN1, schicken);
- }
+ if (HAS_PCH_LPT(i915))
+ intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ LPT_PWM_GRANULARITY : 0);
+ else
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -533,14 +516,16 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
@@ -578,7 +563,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = intel_de_read(i915, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CTL, 0);
}
@@ -618,7 +604,8 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CTL2, ctl2);
}
@@ -653,7 +640,8 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
@@ -685,7 +673,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.controller == 1) {
val = intel_de_read(i915, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&i915->drm, "util pin already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
intel_de_write(i915, UTIL_PIN_CTL, val);
}
@@ -699,7 +688,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
@@ -1270,6 +1260,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1297,6 +1291,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1335,6 +1333,10 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1364,6 +1366,10 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1392,6 +1398,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
+ connector->base.base.id, connector->base.name, pipe_name(pipe));
+
return 0;
}
@@ -1428,9 +1438,38 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
+static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
+{
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ return 1;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ return 2;
+
+ return 1;
+}
+
+static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller)
+{
+ if (controller < 0 || controller >= cnp_num_backlight_controllers(i915))
+ return false;
+
+ if (controller == 1 &&
+ INTEL_PCH_TYPE(i915) >= PCH_ICP &&
+ INTEL_PCH_TYPE(i915) < PCH_MTP)
+ return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+
+ return true;
+}
+
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
@@ -1440,10 +1479,15 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
/*
* CNP has the BXT implementation of backlight, but with only one
- * controller. TODO: ICP has multiple controllers but we only use
- * controller 0 for now.
+ * controller. ICP+ can have two controllers, depending on pin muxing.
*/
- panel->backlight.controller = 0;
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
+ if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+ panel->backlight.controller = 0;
+ }
pwm_ctl = intel_de_read(i915,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -1462,6 +1506,11 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
@@ -1483,8 +1532,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&i915->drm, "Failed to get the %s PWM chip\n",
- desc);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1501,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
} else {
@@ -1510,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n",
- desc);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
+ connector->base.base.id, connector->base.name, desc);
+
return 0;
}
@@ -1554,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s
static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ int ret;
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0)
return ret;
@@ -1595,10 +1648,12 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (!connector->panel.vbt.backlight.present) {
if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT, but present per quirk\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
+ connector->base.base.id, connector->base.name);
} else {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
}
@@ -1614,16 +1669,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (ret) {
drm_dbg_kms(&i915->drm,
- "failed to setup backlight for connector %s\n",
- connector->base.name);
+ "[CONNECTOR:%d:%s] failed to setup backlight\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
panel->backlight.present = true;
drm_dbg_kms(&i915->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->base.name,
+ "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
+ connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 55544d484318..e54febd34ca9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,16 +25,15 @@
*
*/
-#include <drm/drm_edid.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
-
-#include "display/intel_display.h"
-#include "display/intel_display_types.h"
-#include "display/intel_gmbus.h"
+#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -620,14 +619,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
return intel_opregion_get_panel_type(i915);
}
static int vbt_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_options *lvds_options;
@@ -652,12 +651,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
static int pnpid_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
const struct lvds_pnp_id *edid_id;
struct lvds_pnp_id edid_id_nodate;
+ const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
int i, best = -1;
if (!edid)
@@ -701,7 +701,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
static int fallback_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
return use_fallback ? 0 : -1;
}
@@ -715,13 +715,13 @@ enum panel_type {
static int get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
struct {
const char *name;
int (*get_panel_type)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid, bool use_fallback);
+ const struct drm_edid *drm_edid, bool use_fallback);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@@ -745,7 +745,7 @@ static int get_panel_type(struct drm_i915_private *i915,
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
- edid, use_fallback);
+ drm_edid, use_fallback);
drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
@@ -1033,6 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
size_t exp_size;
@@ -1083,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
+ if (i915->display.vbt.version >= 239)
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout =
+ DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
+ else
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
+
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
@@ -1201,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
static void
parse_sdvo_device_mapping(struct drm_i915_private *i915)
{
- struct sdvo_device_mapping *mapping;
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
int count = 0;
/*
@@ -1216,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ struct sdvo_device_mapping *mapping;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -2074,7 +2080,6 @@ parse_compression_parameters(struct drm_i915_private *i915)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
u16 block_size;
int index;
@@ -2099,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
@@ -2225,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2291,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2467,6 +2472,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -2505,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
-static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
@@ -2516,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
-static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 244)
return 0;
@@ -2570,14 +2591,27 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
}
-static bool
+bool
intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
{
return intel_bios_encoder_supports_dp(devdata) &&
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
-static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+static bool
+intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
+}
+
+bool
+intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+}
+
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
@@ -2585,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
return devdata->child.hdmi_level_shifter_value;
}
-static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
@@ -2627,7 +2661,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
{
struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
is_dvi = intel_bios_encoder_supports_dvi(devdata);
@@ -2635,44 +2669,45 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
is_crt = intel_bios_encoder_supports_crt(devdata);
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
is_edp = intel_bios_encoder_supports_edp(devdata);
+ is_dsi = intel_bios_encoder_supports_dsi(devdata);
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
drm_dbg_kms(&i915->drm,
- "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(i915) && child->lspcon,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
+ intel_bios_encoder_is_lspcon(devdata),
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
- max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
- dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
- hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
@@ -2694,6 +2729,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+ port = dsi_dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE)
return;
@@ -2785,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915)
expected_size = 37;
} else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->display.vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 250) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -3187,7 +3224,7 @@ out:
static void intel_bios_init_panel(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid,
+ const struct drm_edid *drm_edid,
bool use_fallback)
{
/* already have it? */
@@ -3197,7 +3234,7 @@ static void intel_bios_init_panel(struct drm_i915_private *i915,
}
panel->vbt.panel_type = get_panel_type(i915, devdata,
- edid, use_fallback);
+ drm_edid, use_fallback);
if (panel->vbt.panel_type < 0) {
drm_WARN_ON(&i915->drm, use_fallback);
return;
@@ -3228,9 +3265,9 @@ void intel_bios_init_panel_early(struct drm_i915_private *i915,
void intel_bios_init_panel_late(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
- intel_bios_init_panel(i915, panel, devdata, edid, true);
+ intel_bios_init_panel(i915, panel, devdata, drm_edid, true);
}
/**
@@ -3280,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel)
bool intel_bios_is_tv_present(struct drm_i915_private *i915)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (!i915->display.vbt.int_tv_support)
return false;
@@ -3289,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/*
* If the device type is not TV, continue.
@@ -3323,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (list_empty(&i915->display.vbt.display_devices))
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -3371,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
+
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->display.vbt.ports[port];
-}
+ if (!is_port_valid(i915, port))
+ return false;
-/**
- * intel_bios_is_port_edp - is the device in given port eDP
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if the device in %port is eDP.
- */
-bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
-{
- const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ const struct child_device_config *child = &devdata->child;
+
+ if (dvo_port_to_port(i915, child->dvo_port) == port)
+ return true;
+ }
- return devdata && intel_bios_encoder_supports_edp(devdata);
+ return false;
}
static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
@@ -3431,30 +3463,24 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- u8 dvo_port;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = child->dvo_port;
-
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3531,15 +3557,14 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
@@ -3553,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
return false;
}
-/**
- * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if HPD should be inverted for %port.
- */
-bool
-intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- if (drm_WARN_ON_ONCE(&i915->drm,
- !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
- return false;
-
- return devdata && devdata->child.hpd_invert;
-}
-
-/**
- * intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if LSPCON is present on this port
- */
-bool
-intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
-}
-
-/**
- * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if port requires lane reversal
- */
-bool
-intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return devdata && devdata->child.lane_reversal;
-}
-
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
- enum port port)
+static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
- if (!devdata || !devdata->child.aux_channel) {
- aux_ch = (enum aux_ch)port;
-
- drm_dbg_kms(&i915->drm,
- "using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
/*
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
* map to DDI A,B,TC1,TC2 respectively.
@@ -3627,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (devdata->child.aux_channel) {
+ switch (aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -3688,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(devdata->child.aux_channel);
+ MISSING_CASE(aux_channel);
aux_ch = AUX_CH_A;
break;
}
- drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
return aux_ch;
}
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_max_tmds_clock(devdata);
-}
-
-/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+ if (!devdata || !devdata->child.aux_channel)
+ return AUX_CH_NONE;
- return _intel_bios_hdmi_level_shift(devdata);
+ return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3724,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
return translate_iboost(devdata->child.dp_iboost_level);
}
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3732,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
return translate_iboost(devdata->child.hdmi_iboost_level);
}
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_link_rate(devdata);
-}
-
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_lane_count(devdata);
-}
-
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3769,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.lane_reversal;
+}
+
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.hpd_invert;
+}
+
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index ff1fdd2e0c1c..8a0730c9b48c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,12 +32,13 @@
#include <linux/types.h>
+struct drm_edid;
struct drm_i915_private;
-struct edid;
struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
struct intel_panel;
+enum aux_ch;
enum port;
enum intel_backlight_type {
@@ -238,7 +239,7 @@ void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid);
+ const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
@@ -248,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
@@ -272,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 1c236f02b380..202321ffbe2a 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 qgv_points = 0, psf_points = 0;
+
+ /*
+ * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+ * it with failure if we try masking any unadvertised points.
+ * So need to operate only with those returned from PCode.
+ */
+ if (num_qgv_points > 0)
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
+
+ if (num_psf_gv_points > 0)
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
+
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
+}
+
+static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+{
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ ICL_PCODE_REQ_QGV_PT_MASK);
+}
+
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask)
{
@@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return ret;
}
+ dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ I915_SAGV_ENABLED : I915_SAGV_DISABLED;
+
return 0;
}
@@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
-{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
- u16 qgv_points = 0, psf_points = 0;
-
- /*
- * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
- * it with failure if we try masking any unadvertised points.
- * So need to operate only with those returned from PCode.
- */
- if (num_qgv_points > 0)
- qgv_points = GENMASK(num_qgv_points - 1, 0);
-
- if (num_psf_gv_points > 0)
- psf_points = GENMASK(num_psf_gv_points - 1, 0);
-
- return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index cb7ee3a24a58..f20292143745 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,7 @@
#include <drm/drm_atomic.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0c107a38f9d0..084a483f9776 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1319,11 +1319,35 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals rplu_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{}
@@ -1801,6 +1825,13 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
return true;
}
+static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
+{
+ return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) &&
+ dev_priv->display.cdclk.hw.vco > 0 &&
+ HAS_CDCLK_SQUASH(dev_priv));
+}
+
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1815,9 +1846,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11)
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ /* wa_15010685871: dg2, mtl */
+ if (pll_enable_wa_needed(dev_priv))
+ dg2_cdclk_squash_program(dev_priv, 0);
+
icl_cdclk_pll_update(dev_priv, vco);
- else
+ } else
bxt_cdclk_pll_update(dev_priv, vco);
waveform = cdclk_squash_waveform(dev_priv, cdclk);
@@ -3353,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
+ else if (IS_ADLP_RPLU(dev_priv))
+ dev_priv->display.cdclk.table = rplu_cdclk_table;
else
dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index c674879a84a5..51e2f6a11ce4 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index d57631b0bb9a..a6dd08598233 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -257,7 +257,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(i915) >= 11)
return false;
- /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */
+ /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
return false;
@@ -624,7 +624,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
@@ -633,7 +633,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
@@ -847,17 +847,6 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
intel_de_write_fw(i915, reg, val);
}
-static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val)
-{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- if (crtc_state->dsb)
- intel_dsb_indexed_reg_write(crtc_state->dsb, reg, val);
- else
- intel_de_write_fw(i915, reg, val);
-}
-
static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
@@ -962,8 +951,8 @@ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
prec_index);
for (i = 0; i < lut_size; i++)
- ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
@@ -1093,13 +1082,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- lut[i].green);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -1165,10 +1154,10 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
@@ -1204,10 +1193,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
@@ -1225,10 +1214,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
@@ -1267,8 +1256,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
}
- if (crtc_state->dsb)
- intel_dsb_commit(crtc_state->dsb);
+ if (crtc_state->dsb) {
+ intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_commit(crtc_state->dsb, false);
+ intel_dsb_wait(crtc_state->dsb);
+ }
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1391,7 +1383,10 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
/* FIXME DSB has issues loading LUTs, disable it for now */
return;
- crtc_state->dsb = intel_dsb_prepare(crtc);
+ if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
+ return;
+
+ crtc_state->dsb = intel_dsb_prepare(crtc, 1024);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
@@ -1511,6 +1506,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
/* plane control register changes blocked by CxSR */
if (HAS_GMCH(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 8b870b2dd4f9..922a6d87b553 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum phy phy)
{
const struct icl_procmon *procmon;
- u32 val;
procmon = icl_get_procmon_ref_values(dev_priv, phy);
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ (0xff << 16) | 0xff, procmon->dw1);
intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
@@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
- DCC_MODE_SELECT_MASK,
- DCC_MODE_SELECT_CONTINUOSLY);
+ DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
ret &= icl_verify_procmon_ref_values(dev_priv, phy);
@@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
- u32 val;
if (is_dsi) {
drm_WARN_ON(&dev_priv->drm, lane_reversal);
@@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
- val &= ~PWR_DOWN_LN_MASK;
- val |= lane_mask;
- intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ PWR_DOWN_LN_MASK, lane_mask);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -360,25 +353,19 @@ skip_phy_misc:
val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
- val |= DCC_MODE_SELECT_CONTINUOSLY;
+ val |= RUN_DCC_ONCE;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
}
icl_set_procmon_ref_values(dev_priv, phy);
- if (phy_is_master(dev_priv, phy)) {
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
- val |= IREFGEN;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
- }
-
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val |= COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ if (phy_is_master(dev_priv, phy))
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ 0, IREFGEN);
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- val |= CL_POWER_DOWN_ENABLE;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, CL_POWER_DOWN_ENABLE);
}
}
@@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
enum phy phy;
for_each_combo_phy_reverse(dev_priv, phy) {
- u32 val;
-
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy)) {
if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
@@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val &= ~COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 2ed65193ca19..b0983edccf3f 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -90,8 +90,8 @@
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
-#define DCC_MODE_SELECT_MASK (0x3 << 20)
-#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
+#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
+#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 562da3b741e2..257afac34839 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -95,13 +95,10 @@ void intel_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- kfree(intel_connector->detect_edid);
+ drm_edid_free(intel_connector->detect_edid);
intel_hdcp_cleanup(intel_connector);
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
intel_panel_fini(intel_connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 7267ffc7f539..8f2ebead0826 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -260,7 +260,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
@@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
}
static void hsw_enable_crt(struct intel_atomic_state *state,
@@ -678,10 +678,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
+intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
@@ -693,25 +694,25 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
- save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe));
- save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe));
- vblank = intel_de_read(dev_priv, VBLANK(pipe));
+ save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder));
+ save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
- vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
- vactive = (save_vtotal & 0x7ff) + 1;
+ vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1;
+ vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1;
- vblank_start = (vblank & 0xfff) + 1;
- vblank_end = ((vblank >> 16) & 0xfff) + 1;
+ vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1;
+ vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1;
/* Set the border color to purple. */
- intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050);
if (DISPLAY_VER(dev_priv) != 2) {
- u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe));
+ u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- intel_de_write(dev_priv, PIPECONF(pipe),
- pipeconf | PIPECONF_FORCE_BORDER);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ transconf | TRANSCONF_FORCE_BORDER);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/* Wait for next Vblank to substitue
* border color for Color info */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@@ -720,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
connector_status_connected :
connector_status_disconnected;
- intel_de_write(dev_priv, PIPECONF(pipe), pipeconf);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -730,12 +731,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv, VSYNC(pipe));
- u32 vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1;
vblank_start = vsync_start;
- intel_de_write(dev_priv, VBLANK(pipe),
- (vblank_start - 1) | ((vblank_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(vblank_start - 1) |
+ VBLANK_END(vblank_end - 1));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -766,7 +768,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/* restore vblank if necessary */
if (restore_vblank)
- intel_de_write(dev_priv, VBLANK(pipe), vblank);
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -779,7 +781,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 037fc140b585..b79a8834559f 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -25,9 +25,11 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -313,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
crtc->plane_ids_mask |= BIT(primary->id);
+ intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index e3273fe8ddac..766633566fd6 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
@@ -12,14 +14,16 @@
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
+ drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
@@ -56,6 +60,17 @@ intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
}
+static void
+intel_dump_buffer(struct drm_i915_private *i915,
+ const char *prefix, const u8 *buf, size_t len)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
+ 16, 0, buf, len, false);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -236,6 +251,10 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ if (pipe_config->has_audio)
+ intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
+
drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d190fa0d393b..c3173c0c2068 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -532,9 +532,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 1f5a471a0adf..0950bcfea4c0 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -47,6 +47,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -67,6 +68,7 @@
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -89,7 +91,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
{
int level;
- level = intel_bios_hdmi_level_shift(encoder);
+ level = intel_bios_hdmi_level_shift(encoder->devdata);
if (level < 0)
level = trans->hdmi_default_entry;
@@ -126,7 +128,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_dp_boost_level(encoder->devdata))
+ intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -158,7 +160,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -644,19 +646,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
- u32 tmp;
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (enable)
- tmp |= hdcp_mask;
- else
- tmp &= ~hdcp_mask;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -948,8 +945,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
main_link_aux_power_domain_get(dig_port, crtc_state);
}
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -957,33 +954,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 13)
- val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
- val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
- else
- val = TRANS_CLK_SEL_PORT(encoder->port);
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
- }
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
+ else
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
- else
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
- }
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_DISABLED;
+ else
+ val = TRANS_CLK_SEL_DISABLED;
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -1009,9 +1007,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
+ iboost = intel_bios_hdmi_boost_level(encoder->devdata);
else
- iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
+ iboost = intel_bios_dp_boost_level(encoder->devdata);
if (iboost == 0) {
const struct intel_ddi_buf_trans *trans;
@@ -2200,15 +2198,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val |= DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ 0, DP_TP_CTL_FEC_ENABLE);
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2216,15 +2212,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_FEC_ENABLE, 0);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
@@ -2387,7 +2381,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
if (HAS_DP20(dev_priv))
intel_ddi_config_transcoder_dp2(encoder, crtc_state);
@@ -2514,7 +2508,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2556,7 +2550,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -2622,12 +2616,10 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
- }
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_LINK_TRAIN_PAT1);
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -2660,19 +2652,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- u32 val;
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_MODE_SELECT_MASK);
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
+ 0);
}
} else {
if (!is_mst)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
@@ -2683,7 +2670,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* transcoder"
*/
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
@@ -2709,12 +2696,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
old_crtc_state, old_conn_state);
if (DISPLAY_VER(dev_priv) < 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
@@ -3222,12 +3209,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- u32 val;
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -3496,6 +3480,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
intel_psr_get_config(encoder, pipe_config);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
void intel_ddi_get_clock(struct intel_encoder *encoder,
@@ -4303,7 +4289,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_bios_encoder_supports_hdmi(devdata);
init_dp = intel_bios_encoder_supports_dp(devdata);
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(devdata)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
@@ -4323,7 +4309,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
if (intel_phy_is_snps(dev_priv, phy) &&
- dev_priv->snps_phy_failed_calibration & BIT(phy)) {
+ dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
drm_dbg_kms(&dev_priv->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
@@ -4498,12 +4484,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ if (intel_bios_encoder_lane_reversal(devdata))
dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(encoder);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
@@ -4519,35 +4505,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
- if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
- goto err;
-
- dig_port->hpd_pulse = intel_dp_hpd_pulse;
-
- if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
- }
-
- /* In theory we don't need the encoder->type check, but leave it just in
- * case we have some really bad VBTs... */
- if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
- goto err;
- }
-
if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
- IS_BROXTON(dev_priv))
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ dig_port->connected = bdw_digital_port_connected;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ dig_port->connected = lpt_digital_port_connected;
+ } else if (IS_BROADWELL(dev_priv)) {
+ if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else {
+ } else if (IS_HASWELL(dev_priv)) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -4556,6 +4528,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(dig_port))
+ goto err;
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ }
+
+ /*
+ * In theory we don't need the encoder->type check,
+ * but leave it just in case we have some really bad VBTs...
+ */
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(dig_port))
+ goto err;
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index d39076facdce..361f6874dde5 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -52,9 +52,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 3dbd76fdabd6..42552d8c151e 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -22,6 +22,13 @@ intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
return intel_uncore_read8(&i915->uncore, reg);
}
+static inline u64
+intel_de_read64_2x32(struct drm_i915_private *i915,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+}
+
static inline void
intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
{
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index e75b9b2a0e01..edbcb1273ca2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -24,15 +24,15 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <acpi/video.h>
+#include <linux/dma-resv.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
+#include <acpi/video.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
@@ -45,28 +45,6 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include "display/intel_audio.h"
-#include "display/intel_crt.h"
-#include "display/intel_ddi.h"
-#include "display/intel_display_debugfs.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dp.h"
-#include "display/intel_dp_mst.h"
-#include "display/intel_dpll.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_drrs.h"
-#include "display/intel_dsi.h"
-#include "display/intel_dvo.h"
-#include "display/intel_fb.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_lvds.h"
-#include "display/intel_sdvo.h"
-#include "display/intel_snps_phy.h"
-#include "display/intel_tv.h"
-#include "display/intel_vdsc.h"
-#include "display/intel_vrr.h"
-
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
@@ -76,31 +54,50 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
+#include "i9xx_plane.h"
+#include "i9xx_wm.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dp.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
+#include "intel_drrs.h"
+#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
-#include "intel_modeset_verify.h"
+#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
+#include "intel_modeset_verify.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
@@ -112,10 +109,17 @@
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
+#include "intel_sdvo.h"
+#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
+#include "intel_tv.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vga.h"
-#include "i9xx_plane.h"
+#include "intel_vrr.h"
+#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -130,101 +134,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @dev_priv: i915 device
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->display.funcs.wm->update_wm)
- dev_priv->display.funcs.wm->update_wm(dev_priv);
-}
-
-static int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_pipe_wm)
- return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
- return 0;
-}
-
-static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
- return 0;
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.funcs.wm->compute_pipe_wm))
- return 0;
- return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
-}
-
-static bool intel_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->initial_watermarks) {
- dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
- return true;
- }
- return false;
-}
-
-static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->atomic_update_watermarks)
- dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
-}
-
-static void intel_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->optimize_watermarks)
- dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
-}
-
-static int intel_compute_global_watermarks(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_global_watermarks)
- return dev_priv->display.funcs.wm->compute_global_watermarks(state);
- return 0;
-}
-
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
@@ -293,11 +202,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
}
/* Wa_2006604312:icl,ehl */
@@ -306,11 +215,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -385,41 +292,6 @@ struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
return to_intel_crtc(crtc_state->uapi.crtc);
}
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t reg = PIPEDSL(pipe);
- u32 line1, line2;
-
- line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
- msleep(5);
- line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
-
- return line1 != line2;
-}
-
-static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- drm_err(&dev_priv->drm,
- "pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), str_on_off(state));
-}
-
-static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, false);
-}
-
-static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, true);
-}
-
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
@@ -430,8 +302,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
- PIPECONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -452,8 +324,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
- cur_state = !!(val & PIPECONF_ENABLE);
+ u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
+ cur_state = !!(val & TRANSCONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
@@ -565,15 +437,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if (val & PIPECONF_ENABLE) {
+ if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, reg);
/*
@@ -604,9 +476,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
return;
/*
@@ -614,11 +486,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
* so best keep it disabled when not needed.
*/
if (old_crtc_state->double_wide)
- val &= ~PIPECONF_DOUBLE_WIDE;
+ val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!IS_I830(dev_priv))
- val &= ~PIPECONF_ENABLE;
+ val &= ~TRANSCONF_ENABLE;
if (DISPLAY_VER(dev_priv) >= 14)
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
@@ -628,7 +500,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
intel_de_write(dev_priv, reg, val);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -1095,22 +967,6 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
return encoder;
}
-static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t dslreg = PIPEDSL(pipe);
- u32 temp;
-
- temp = intel_de_read(dev_priv, dslreg);
- udelay(500);
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
- drm_err(&dev_priv->drm,
- "mode set failed: pipe %c stuck\n",
- pipe_name(pipe));
- }
-}
-
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1303,7 +1159,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- u8 update_planes = new_crtc_state->update_planes;
+ u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
+ ~new_crtc_state->async_flip_planes;
const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
bool need_vbl_wait = false;
@@ -1312,7 +1169,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (plane->need_async_flip_disable_wa &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id)) {
+ disable_async_flip_planes & BIT(plane->id)) {
/*
* Apart from the async flip bit we want to
* preserve the old state for the plane.
@@ -1429,7 +1286,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
intel_crtc_async_flip_disable_wa(state, crtc);
}
@@ -1805,7 +1662,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev_priv, pipe);
+ intel_wait_for_pipe_scanline_moving(crtc);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -1852,12 +1709,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
enum transcoder transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
CHICKEN_TRANS(transcoder);
- u32 val;
- val = intel_de_read(dev_priv, reg);
- val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg,
+ HSW_FRAME_START_DELAY_MASK,
+ HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
@@ -1897,7 +1752,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
intel_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1918,6 +1773,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
+ intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+
if (!new_crtc_state->bigjoiner_pipes) {
intel_encoders_pre_pll_enable(state, crtc);
@@ -2053,6 +1910,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* FIXME collapse everything to one hook.
@@ -2062,6 +1920,8 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
}
+
+ intel_dmc_disable_pipe(i915, crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -2865,12 +2725,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
+ crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -2887,23 +2749,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
vsyncshift += adjusted_mode->crtc_htotal;
}
+ /*
+ * VBLANK_START no longer works on ADL+, instead we must use
+ * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
+
if (DISPLAY_VER(dev_priv) > 3)
- intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
- vsyncshift);
-
- intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- intel_de_write(dev_priv, HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- intel_de_write(dev_priv, HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
- intel_de_write(dev_priv, VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
- intel_de_write(dev_priv, VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
+ HTOTAL(adjusted_mode->crtc_htotal - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
+ HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
+ HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(crtc_vblank_start - 1) |
+ VBLANK_END(crtc_vblank_end - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
+ VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -2911,9 +2794,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, VTOTAL(pipe),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
-
+ intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2941,9 +2824,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
@@ -2952,43 +2835,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
+ adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
+ adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
+
+ /* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
+ adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ adjusted_mode->crtc_vtotal += 1;
+ adjusted_mode->crtc_vblank_end += 1;
}
+
+ if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
}
static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3028,7 +2915,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf = 0;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
/*
* - We keep both pipes enabled on 830
@@ -3036,18 +2924,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
- pipeconf |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
+ val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
- pipeconf |= PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN |
+ TRANSCONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
default:
@@ -3055,13 +2943,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- pipeconf |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- pipeconf |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- pipeconf |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
}
}
@@ -3069,23 +2957,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (DISPLAY_VER(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
- pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
} else {
- pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
+ val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
- pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
- pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
- intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
@@ -3244,20 +3132,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
default:
@@ -3267,12 +3155,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ (tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
@@ -3282,7 +3170,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
if (DISPLAY_VER(dev_priv) < 4)
- pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+ pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -3292,7 +3180,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
else
tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -3352,7 +3240,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
/*
@@ -3360,7 +3248,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
switch (crtc_state->pipe_bpp) {
default:
@@ -3368,26 +3256,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- val |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- val |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
case 36:
- val |= PIPECONF_BPC_12;
+ val |= TRANSCONF_BPC_12;
break;
}
if (crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
/*
* This would end up with an odd purple hue over
@@ -3398,18 +3286,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range &&
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
@@ -3424,22 +3312,22 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (IS_HASWELL(dev_priv) && crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
if (IS_HASWELL(dev_priv) &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
- intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
@@ -3664,33 +3552,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->shared_dpll = NULL;
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
- case PIPECONF_BPC_12:
+ case TRANSCONF_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
- if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
- switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
- case PIPECONF_OUTPUT_COLORSPACE_YUV601:
- case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
break;
default:
@@ -3698,11 +3586,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
break;
}
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+ pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
@@ -3979,9 +3867,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
- return tmp & PIPECONF_ENABLE;
+ return tmp & TRANSCONF_ENABLE;
}
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
@@ -4085,9 +3973,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_HASWELL(dev_priv)) {
u32 tmp = intel_de_read(dev_priv,
- PIPECONF(pipe_config->cpu_transcoder));
+ TRANSCONF(pipe_config->cpu_transcoder));
- if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
else
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -4136,7 +4024,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
- PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5429,6 +5317,12 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
return memcmp(a, b, sizeof(*a)) == 0;
}
+static bool
+intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
+{
+ return memcmp(a, b, len) == 0;
+}
+
static void
pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -5479,6 +5373,50 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
}
}
+/* Returns the length up to and including the last differing byte */
+static size_t
+memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (a[i] != b[i])
+ return i + 1;
+ }
+
+ return 0;
+}
+
+static void
+pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const u8 *a, const u8 *b, size_t len)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ } else {
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
+ drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ }
+}
+
static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
@@ -5677,6 +5615,18 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_BUFFER(name, len) do { \
+ BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
+ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
+ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
+ pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
+ current_config->name, \
+ pipe_config->name, \
+ (len)); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
if (current_config->gamma_mode == pipe_config->gamma_mode && \
!intel_color_lut_equal(current_config, \
@@ -5748,6 +5698,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5937,11 +5888,17 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
if (ret)
return ret;
+ ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
crtc_state->update_planes |= crtc_state->active_planes;
+ crtc_state->async_flip_planes = 0;
+ crtc_state->do_async_flip = false;
}
return 0;
@@ -6694,8 +6651,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
* @dev: drm device
* @_state: state to validate
*/
-static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *_state)
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
@@ -8355,124 +8312,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
-static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(state->dev, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc_state->hw.active) {
- /*
- * Preserve the inherited flag to avoid
- * taking the full modeset path.
- */
- crtc_state->inherited = true;
- }
- }
-
- drm_for_each_plane(plane, state->dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-/*
- * Calculate what we think the watermarks should be for the state we've read
- * out of the hardware and then immediately program those watermarks so that
- * we ensure the hardware settings match our internal state.
- *
- * We can calculate what we think WM's should be by creating a duplicate of the
- * current state (which was constructed during hardware readout) and running it
- * through the atomic check code to calculate new watermark values in the
- * state object.
- */
-static void sanitize_watermarks(struct drm_i915_private *dev_priv)
-{
- struct drm_atomic_state *state;
- struct intel_atomic_state *intel_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
-
- /* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
- return;
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return;
-
- intel_state = to_intel_atomic_state(state);
-
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- state->acquire_ctx = &ctx;
-
- /*
- * Hardware readout is the only time we don't want to calculate
- * intermediate watermarks (since we don't trust the current
- * watermarks).
- */
- if (!HAS_GMCH(dev_priv))
- intel_state->skip_intermediate_wm = true;
-
- ret = sanitize_watermarks_add_affected(state);
- if (ret)
- goto fail;
-
- ret = intel_atomic_check(&dev_priv->drm, state);
- if (ret)
- goto fail;
-
- /* Write calculated watermark values back */
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- crtc_state->wm.need_postvbl_update = true;
- intel_optimize_watermarks(intel_state, crtc);
-
- to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
- }
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- drm_WARN(&dev_priv->drm, ret,
- "Could not determine valid watermarks for inherited state\n");
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -8633,12 +8472,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
+ ret = intel_power_domains_init(i915);
+ if (ret < 0)
+ goto cleanup_vga;
+
intel_power_domains_init_hw(i915, false);
if (!HAS_DISPLAY(i915))
return 0;
- intel_dmc_ucode_init(i915);
+ intel_dmc_init(i915);
i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -8673,8 +8516,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
cleanup_vga_client_pw_domain_dmc:
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
+cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
intel_bios_driver_remove(i915);
@@ -8693,7 +8537,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
- intel_init_pm(i915);
+ intel_wm_init(i915);
intel_panel_sanitize_ssc(i915);
@@ -8749,7 +8593,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(i915);
+ ilk_wm_sanitize(i915);
return 0;
}
@@ -8790,6 +8634,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -8816,13 +8661,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(640 - 1) | HTOTAL(800 - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(480 - 1) | VTOTAL(525 - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
intel_de_write(dev_priv, FP0(pipe), fp);
intel_de_write(dev_priv, FP1(pipe), fp);
@@ -8853,8 +8705,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -8877,8 +8729,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
- intel_de_write(dev_priv, PIPECONF(pipe), 0);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), 0);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -8999,7 +8851,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
@@ -9050,7 +8902,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* enabled. We do it last so that the async config cannot run
* before the connectors are registered.
*/
- intel_fbdev_initial_config_async(&i915->drm);
+ intel_fbdev_initial_config_async(i915);
/*
* We need to coordinate the hotplugs with the asynchronous
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index ef73730f32b0..50285fb4fcf5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,9 +28,11 @@
#include <drm/drm_util.h>
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
enum drm_scaling_filter;
struct dpll;
+struct drm_atomic_state;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
@@ -62,51 +64,9 @@ struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
-/*
- * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
- * rest have consecutive values and match the enum values of transcoders
- * with a 1:1 transcoder -> pipe mapping.
- */
-enum pipe {
- INVALID_PIPE = -1,
-
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- PIPE_D,
- _PIPE_EDP,
-
- I915_MAX_PIPES = _PIPE_EDP
-};
#define pipe_name(p) ((p) + 'A')
-enum transcoder {
- INVALID_TRANSCODER = -1,
- /*
- * The following transcoders have a 1:1 transcoder -> pipe mapping,
- * keep their values fixed: the code assumes that TRANSCODER_A=0, the
- * rest have consecutive values and match the enum values of the pipes
- * they map to.
- */
- TRANSCODER_A = PIPE_A,
- TRANSCODER_B = PIPE_B,
- TRANSCODER_C = PIPE_C,
- TRANSCODER_D = PIPE_D,
-
- /*
- * The following transcoders can map to any pipe, their enum value
- * doesn't need to stay fixed.
- */
- TRANSCODER_EDP,
- TRANSCODER_DSI_0,
- TRANSCODER_DSI_1,
- TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
- TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
-
- I915_MAX_TRANSCODERS
-};
-
static inline const char *transcoder_name(enum transcoder transcoder)
{
switch (transcoder) {
@@ -147,29 +107,6 @@ enum i9xx_plane_id {
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_SPRITE3,
- PLANE_SPRITE4,
- PLANE_SPRITE5,
- PLANE_CURSOR,
-
- I915_MAX_PLANES,
-};
-
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
@@ -182,34 +119,6 @@ enum plane_id {
for_each_dbuf_slice((__dev_priv), (__slice)) \
for_each_if((__mask) & BIT(__slice))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
- PORT_G,
- PORT_H,
- PORT_I,
-
- /* tgl+ */
- PORT_TC1 = PORT_D,
- PORT_TC2,
- PORT_TC3,
- PORT_TC4,
- PORT_TC5,
- PORT_TC6,
-
- /* XE_LPD repositions D/E offsets and bitfields */
- PORT_D_XELPD = PORT_TC5,
- PORT_E_XELPD,
-
- I915_MAX_PORTS
-};
-
#define port_name(p) ((p) + 'A')
/*
@@ -263,6 +172,8 @@ enum tc_port_mode {
};
enum aux_ch {
+ AUX_CH_NONE = -1,
+
AUX_CH_A,
AUX_CH_B,
AUX_CH_C,
@@ -312,27 +223,6 @@ enum phy_fia {
FIA3,
};
-enum hpd_pin {
- HPD_NONE = 0,
- HPD_TV = HPD_NONE, /* TV is known to be unreliable */
- HPD_CRT,
- HPD_SDVO_B,
- HPD_SDVO_C,
- HPD_PORT_A,
- HPD_PORT_B,
- HPD_PORT_C,
- HPD_PORT_D,
- HPD_PORT_E,
- HPD_PORT_TC1,
- HPD_PORT_TC2,
- HPD_PORT_TC3,
- HPD_PORT_TC4,
- HPD_PORT_TC5,
- HPD_PORT_TC6,
-
- HPD_NUM_PINS
-};
-
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
@@ -507,6 +397,7 @@ enum hpd_pin {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 57ddce3ba02b..fdab7bb93a7d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -17,15 +17,14 @@
#include <drm/drm_modeset_lock.h>
#include "intel_cdclk.h"
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
-#include "intel_dmc.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct drm_property;
@@ -40,6 +39,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dmc;
struct intel_dpll_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
@@ -85,6 +85,12 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
+ void (*get_hw_state)(struct drm_i915_private *i915);
+};
+
+struct intel_audio_state {
+ struct intel_encoder *encoder;
+ u8 eld[MAX_ELD_BYTES];
};
struct intel_audio {
@@ -96,8 +102,8 @@ struct intel_audio {
int power_refcount;
u32 freq_cntrl;
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
+ /* current audio state for the audio component hooks */
+ struct intel_audio_state state[I915_MAX_TRANSCODERS];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
@@ -122,6 +128,11 @@ struct intel_dpll {
int nssc;
int ssc;
} ref_clks;
+
+ /*
+ * Bitmask of PLLs using the PCH SSC, indexed using enum intel_dpll_id.
+ */
+ u8 pch_ssc_use;
};
struct intel_frontbuffer_tracking {
@@ -233,7 +244,7 @@ struct intel_wm {
struct g4x_wm_values g4x;
};
- u8 max_level;
+ u8 num_levels;
/*
* Should be held around atomic WM register writing; also
@@ -330,6 +341,11 @@ struct intel_display {
} dkl;
struct {
+ struct intel_dmc *dmc;
+ intel_wakeref_t wakeref;
+ } dmc;
+
+ struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base;
} dsi;
@@ -429,6 +445,24 @@ struct intel_display {
} sagv;
struct {
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 phy_failed_calibration;
+ } snps;
+
+ struct {
+ /*
+ * Shadows for CHV DPLL_MD regs to keep the state
+ * checker somewhat working in the presence hardware
+ * crappiness (can't read out DPLL_MD for pipes B & C).
+ */
+ u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
+ } state;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
@@ -438,7 +472,6 @@ struct intel_display {
/* Grouping using named structs. Keep sorted. */
struct intel_audio audio;
- struct intel_dmc dmc;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 7bcd90384a46..1e654ddd0815 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -26,10 +26,9 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "skl_watermark.h"
+#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -1282,237 +1281,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(&dev_priv->drm);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(&dev_priv->drm);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1593,9 +1361,6 @@ static const struct {
const struct file_operations *fops;
} intel_display_debugfs_files[] = {
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
@@ -1622,7 +1387,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
- skl_watermark_ipc_debugfs_register(i915);
+ intel_wm_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
new file mode 100644
index 000000000000..5126d0b5ae5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_LIMITS_H__
+#define __INTEL_DISPLAY_LIMITS_H__
+
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ PIPE_D,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+enum transcoder {
+ INVALID_TRANSCODER = -1,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
+
+ I915_MAX_TRANSCODERS
+};
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ /* tgl+ */
+ PORT_TC1 = PORT_D,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ PORT_D_XELPD = PORT_TC5,
+ PORT_E_XELPD,
+
+ I915_MAX_PORTS
+};
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_A,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_PORT_E,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
+
+ HPD_NUM_PINS
+};
+
+#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1a23ecd4623a..f085ae971150 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -264,9 +264,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+sanitize_target_dc_state(struct drm_i915_private *i915,
u32 target_dc_state)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -279,7 +280,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
+ if (power_domains->allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->display.dmc.target_dc_state)
+ if (state == power_domains->target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -323,7 +324,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->display.dmc.target_dc_state = state;
+ power_domains->target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -992,10 +993,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->display.dmc.allowed_dc_mask =
+ power_domains->allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->display.dmc.target_dc_state =
+ power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1260,9 +1261,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -1306,9 +1305,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
@@ -1347,15 +1344,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
@@ -1363,25 +1356,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_LP_PARTITION_LEVEL_DISABLE);
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
i915_reg_t reg;
- u32 reset_bits, val;
+ u32 reset_bits;
if (IS_IVYBRIDGE(dev_priv)) {
reg = GEN7_MSG_CTL;
@@ -1394,14 +1383,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- val = intel_de_read(dev_priv, reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -1580,10 +1562,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- /* Wa_1409767108:tgl,dg1,adl-s */
+ IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -1618,7 +1598,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1670,11 +1649,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
- DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val);
- }
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) >= 13)
@@ -2055,7 +2033,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2244,22 +2222,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
void intel_display_power_resume(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
+ if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
+ else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2154d900b1aa..8e96be8e6330 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -137,6 +137,10 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+
intel_wakeref_t init_wakeref;
intel_wakeref_t disable_wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index f5d66ca85b19..6645eb1911d8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -10,6 +10,7 @@
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_types.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8710dd41ffd4..1676df1dc066 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -333,7 +333,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
@@ -356,9 +355,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
@@ -380,17 +377,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
hsw_power_well_pre_disable(dev_priv,
power_well->desc->irq_pipe_mask);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_EDP &&
+ encoder->port == port)
+ return true;
+ }
+
+ return false;
+}
+
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -398,29 +405,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
+ !intel_port_is_edp(dev_priv, (enum port)phy))
+ intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
+ 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
}
static void
@@ -430,17 +430,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -502,19 +497,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- u32 val;
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+ intel_de_rmw(dev_priv, regs->driver,
+ 0,
+ HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
/*
* An AUX timeout is expected if the TBT DP tunnel is down,
@@ -700,19 +691,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+void gen9_sanitize_dc_state(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
u32 val;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->display.dmc.dc_state, val);
- dev_priv->display.dmc.dc_state = val;
+ power_domains->dc_state, val);
+ power_domains->dc_state = val;
}
/**
@@ -740,6 +732,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
*/
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 val;
u32 mask;
@@ -747,8 +740,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->display.dmc.allowed_dc_mask))
- state &= dev_priv->display.dmc.allowed_dc_mask;
+ state & ~power_domains->allowed_dc_mask))
+ state &= power_domains->allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -756,16 +749,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->display.dmc.dc_state)
+ if ((val & mask) != power_domains->dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->display.dmc.dc_state, val & mask);
+ power_domains->dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->display.dmc.dc_state = val & mask;
+ power_domains->dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -776,12 +769,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
+ intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -820,8 +809,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -847,8 +836,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -957,9 +946,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -998,10 +988,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->display.dmc.target_dc_state) {
+ switch (power_domains->target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1033,9 +1025,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1049,8 +1041,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1149,18 +1141,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 val;
-
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1276,8 +1264,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1289,8 +1276,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
vlv_set_power_well(dev_priv, power_well, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 02605418ff08..755c1ea8225c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -13,7 +13,7 @@
#define VLV_DISPLAY_BASE 0x180000
/*
- * Named helper wrappers around _PICK_EVEN() and _PICK().
+ * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES().
*/
#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
@@ -29,12 +29,8 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
-#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__))
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
new file mode 100644
index 000000000000..918d0327169a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_vblank.h>
+
+#include "gt/intel_rps.h"
+#include "i915_drv.h"
+#include "intel_display_rps.h"
+#include "intel_display_types.h"
+
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+ if (state->rps_interactive == interactive)
+ return;
+
+ intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
+ state->rps_interactive = interactive;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
new file mode 100644
index 000000000000..e19009c2371a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_RPS_H__
+#define __INTEL_DISPLAY_RPS_H__
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_atomic_state;
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence);
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive);
+
+#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 725aba3fa531..651ea8564e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -17,6 +17,7 @@
#include "i915_irq.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
+#include "intel_vblank.h"
#define __dev_name_i915(i915) dev_name((i915)->drm.dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 32e8b2fc3cc6..c32bfba06ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -50,9 +50,10 @@
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_printer;
struct __intel_global_objs_state;
@@ -262,8 +263,6 @@ struct intel_encoder {
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
/* VBT information for this encoder (may be NULL for older platforms) */
const struct intel_bios_encoder_data *devdata;
@@ -327,6 +326,7 @@ struct intel_vbt_panel_data {
struct {
u16 pwm_freq_hz;
u16 brightness_precision_bits;
+ u16 hdr_dpcd_refresh_timeout;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
@@ -351,6 +351,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ const struct drm_edid *fixed_edid;
+
struct list_head fixed_modes;
/* backlight */
@@ -591,9 +594,8 @@ struct intel_connector {
/* Panel info for eDP and LVDS */
struct intel_panel panel;
- /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
- struct edid *edid;
- struct edid *detect_edid;
+ /* Cached EDID for detect. */
+ const struct drm_edid *detect_edid;
/* Number of times hotplug detection was tried after an HPD interrupt */
int hotplug_retries;
@@ -1248,6 +1250,9 @@ struct intel_crtc_state {
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ /* bitmask of planes with async flip active */
+ u8 async_flip_planes;
+
u8 framestart_delay; /* 1-4 */
u8 msa_timing_delay; /* 0-3 */
@@ -1261,6 +1266,8 @@ struct intel_crtc_state {
struct drm_dp_vsc_sdp vsc;
} infoframes;
+ u8 eld[MAX_ELD_BYTES];
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -1499,17 +1506,6 @@ struct intel_watermark_params {
u8 cacheline_size;
};
-struct cxsr_latency {
- bool is_desktop : 1;
- bool is_ddr3 : 1;
- u16 fsb_freq;
- u16 mem_freq;
- u16 display_sr;
- u16 display_hpll_disable;
- u16 cursor_sr;
- u16 cursor_hpll_disable;
-};
-
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
@@ -1628,6 +1624,8 @@ struct intel_psr {
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 4124b3d37110..6b162f77340e 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -38,55 +38,98 @@
* low-power state and comes back to normal.
*/
+enum intel_dmc_id {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
+ DMC_FW_MAX
+};
+
+struct intel_dmc {
+ struct drm_i915_private *i915;
+ struct work_struct work;
+ const char *fw_path;
+ u32 max_fw_size; /* bytes */
+ u32 version;
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+};
+
+/* Note: This may be NULL. */
+static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
+{
+ return i915->display.dmc.dmc;
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
-#define DMC_PATH(platform, major, minor) \
- "i915/" \
- __stringify(platform) "_dmc_ver" \
- __stringify(major) "_" \
+#define DMC_PATH(platform) \
+ "i915/" __stringify(platform) "_dmc.bin"
+
+/*
+ * New DMC additions should not use this. This is used solely to remain
+ * compatible with systems that have not yet updated DMC blobs to use
+ * unversioned file names.
+ */
+#define DMC_LEGACY_PATH(platform, major, minor) \
+ "i915/" \
+ __stringify(platform) "_dmc_ver" \
+ __stringify(major) "_" \
__stringify(minor) ".bin"
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 08)
+#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
+#define ADLP_DMC_PATH DMC_PATH(adlp)
+#define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
+MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
-#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
+#define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01)
MODULE_FIRMWARE(ADLS_DMC_PATH);
-#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
+#define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02)
MODULE_FIRMWARE(DG1_DMC_PATH);
-#define RKL_DMC_PATH DMC_PATH(rkl, 2, 03)
+#define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03)
MODULE_FIRMWARE(RKL_DMC_PATH);
-#define TGL_DMC_PATH DMC_PATH(tgl, 2, 12)
+#define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12)
MODULE_FIRMWARE(TGL_DMC_PATH);
-#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
+#define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09)
#define ICL_DMC_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_DMC_PATH);
-#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
+#define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04)
#define GLK_DMC_MAX_FW_SIZE 0x4000
MODULE_FIRMWARE(GLK_DMC_PATH);
-#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
+#define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04)
#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(KBL_DMC_PATH);
-#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
+#define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27)
#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(SKL_DMC_PATH);
-#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
+#define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07)
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
@@ -97,6 +140,8 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
#define DMC_V3_MAX_MMIO_COUNT 20
#define DMC_V1_MMIO_START_RANGE 0x80000
+#define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A))
+
struct intel_css_header {
/* 0x09 for DMC */
u32 module_type;
@@ -237,9 +282,19 @@ struct stepping_info {
char substepping;
};
-static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+#define for_each_dmc_id(__dmc_id) \
+ for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
+
+static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
+{
+ return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
+}
+
+static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
{
- return i915->display.dmc.dmc_info[dmc_id].payload;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ return dmc && dmc->dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -258,12 +313,12 @@ intel_get_stepping_info(struct drm_i915_private *i915,
return si;
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
{
/* The below bit doesn't need to be cleared ever afterwards */
- intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ intel_de_rmw(i915, DC_STATE_DEBUG, 0,
DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
+ intel_de_posting_read(i915, DC_STATE_DEBUG);
}
static void disable_event_handler(struct drm_i915_private *i915,
@@ -303,26 +358,23 @@ disable_flip_queue_event(struct drm_i915_private *i915,
}
static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
{
- switch (dmc_id) {
- case DMC_FW_MAIN:
+ if (dmc_id == DMC_FW_MAIN) {
if (DISPLAY_VER(i915) == 12) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
return true;
}
- break;
- case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
if (IS_DG2(i915)) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
return true;
}
- break;
}
return false;
@@ -331,13 +383,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
static void
disable_all_flip_queue_events(struct drm_i915_private *i915)
{
- int dmc_id;
+ enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
return;
- for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ for_each_dmc_id(dmc_id) {
i915_reg_t ctl_reg;
i915_reg_t htp_reg;
@@ -353,22 +405,22 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
- int id;
+ enum intel_dmc_id dmc_id;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(i915) < 12)
return;
- for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ for_each_dmc_id(dmc_id) {
int handler;
- if (!has_dmc_id_fw(i915, id))
+ if (!has_dmc_id_fw(i915, dmc_id))
continue;
for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
disable_event_handler(i915,
- DMC_EVT_CTL(i915, id, handler),
- DMC_EVT_HTP(i915, id, handler));
+ DMC_EVT_CTL(i915, dmc_id, handler),
+ DMC_EVT_HTP(i915, dmc_id, handler));
}
}
@@ -396,59 +448,87 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
*
* DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_dmc_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
- u32 id, i;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
+ u32 i;
- if (!intel_dmc_has_payload(dev_priv))
+ if (!intel_dmc_has_payload(i915))
return;
- pipedmc_clock_gating_wa(dev_priv, true);
+ pipedmc_clock_gating_wa(i915, true);
- disable_all_event_handlers(dev_priv);
+ disable_all_event_handlers(i915);
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
preempt_disable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
- intel_de_write_fw(dev_priv,
- DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
- dmc->dmc_info[id].payload[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(i915,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
}
}
preempt_enable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
- intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
- dmc->dmc_info[id].mmiodata[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]);
}
}
- dev_priv->display.dmc.dc_state = 0;
+ power_domains->dc_state = 0;
- gen9_set_dc_state_debugmask(dev_priv);
+ gen9_set_dc_state_debugmask(i915);
/*
* Flip queue events need to be disabled before enabling DC5/6.
* i915 doesn't use the flip queue feature, so disable it already
* here.
*/
- disable_all_flip_queue_events(dev_priv);
+ disable_all_flip_queue_events(i915);
- pipedmc_clock_gating_wa(dev_priv, false);
+ pipedmc_clock_gating_wa(i915, false);
}
/**
@@ -470,8 +550,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
void assert_dmc_loaded(struct drm_i915_private *i915)
{
- drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
+ drm_WARN_ONCE(&i915->drm, dmc &&
+ !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -506,15 +589,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
const struct stepping_info *si,
u8 package_ver)
{
- unsigned int i, id;
-
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
+ enum intel_dmc_id dmc_id;
+ unsigned int i;
for (i = 0; i < num_entries; i++) {
- id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
+ dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
- if (id >= DMC_FW_MAX) {
- drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ if (!is_valid_dmc_id(dmc_id)) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
continue;
}
@@ -522,29 +605,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
* check for the stepping since we already found a previous FW
* for this id.
*/
- if (dmc->dmc_info[id].present)
+ if (dmc->dmc_info[dmc_id].present)
continue;
if (fw_info_matches_stepping(&fw_info[i], si)) {
- dmc->dmc_info[id].present = true;
- dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
+ dmc->dmc_info[dmc_id].present = true;
+ dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
}
}
}
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
- int header_ver, u8 dmc_id)
+ int header_ver, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 start_range, end_range;
int i;
- if (dmc_id >= DMC_FW_MAX) {
- drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
- return false;
- }
-
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
@@ -572,9 +650,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
- size_t rem_size, u8 dmc_id)
+ size_t rem_size, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -685,7 +763,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -739,7 +817,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -759,18 +837,17 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
+ struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
- const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
+ const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
+ enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
- int id;
if (!fw)
return;
@@ -791,62 +868,79 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
- for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->display.dmc.dmc_info[id].present)
+ for_each_dmc_id(dmc_id) {
+ if (!dmc->dmc_info[dmc_id].present)
continue;
- offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
if (offset > fw->size) {
- drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ drm_err(&i915->drm, "Reading beyond the fw_size\n");
continue;
}
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
- parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
}
-static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
- dev_priv->display.dmc.wakeref =
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+ i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
-static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->display.dmc.wakeref);
+ fetch_and_zero(&i915->display.dmc.wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+}
+
+static const char *dmc_fallback_path(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_DMC_FALLBACK_PATH;
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ return NULL;
}
static void dmc_load_work_fn(struct work_struct *work)
{
- struct drm_i915_private *dev_priv;
- struct intel_dmc *dmc;
+ struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
+ struct drm_i915_private *i915 = dmc->i915;
const struct firmware *fw = NULL;
+ const char *fallback_path;
+ int err;
+
+ err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
+
+ if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ fallback_path = dmc_fallback_path(i915);
+ if (fallback_path) {
+ drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
+ dmc->fw_path, fallback_path);
+ err = request_firmware(&fw, fallback_path, i915->drm.dev);
+ if (err == 0)
+ dmc->fw_path = fallback_path;
+ }
+ }
- dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
- dmc = &dev_priv->display.dmc;
-
- request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
- parse_dmc_fw(dev_priv, fw);
+ parse_dmc_fw(dmc, fw);
- if (intel_dmc_has_payload(dev_priv)) {
- intel_dmc_load_program(dev_priv);
- intel_dmc_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(i915)) {
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
- drm_info(&dev_priv->drm,
- "Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
- drm_notice(&dev_priv->drm,
+ drm_notice(&i915->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
dmc->fw_path);
- drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
+ drm_notice(&i915->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -854,19 +948,17 @@ static void dmc_load_work_fn(struct work_struct *work)
}
/**
- * intel_dmc_ucode_init() - initialize the firmware loading.
- * @dev_priv: i915 drm device.
+ * intel_dmc_init() - initialize the firmware loading.
+ * @i915: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_init(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
-
- INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
+ struct intel_dmc *dmc;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
@@ -877,168 +969,192 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_dmc_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(i915);
+
+ dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return;
- if (IS_DG2(dev_priv)) {
+ dmc->i915 = i915;
+
+ INIT_WORK(&dmc->work, dmc_load_work_fn);
+
+ if (IS_DG2(i915)) {
dmc->fw_path = DG2_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (IS_ALDERLAKE_P(i915)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (IS_ALDERLAKE_S(i915)) {
dmc->fw_path = ADLS_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(dev_priv)) {
+ } else if (IS_DG1(i915)) {
dmc->fw_path = DG1_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (IS_ROCKETLAKE(i915)) {
dmc->fw_path = RKL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(dev_priv)) {
+ } else if (IS_TIGERLAKE(i915)) {
dmc->fw_path = TGL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(i915) == 11) {
dmc->fw_path = ICL_DMC_PATH;
dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(i915)) {
dmc->fw_path = GLK_DMC_PATH;
dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
dmc->fw_path = KBL_DMC_PATH;
dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915)) {
dmc->fw_path = SKL_DMC_PATH;
dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
dmc->fw_path = BXT_DMC_PATH;
dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
- if (dev_priv->params.dmc_firmware_path) {
- if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- dmc->fw_path = NULL;
- drm_info(&dev_priv->drm,
+ if (i915->params.dmc_firmware_path) {
+ if (strlen(i915->params.dmc_firmware_path) == 0) {
+ drm_info(&i915->drm,
"Disabling DMC firmware and runtime PM\n");
- return;
+ goto out;
}
- dmc->fw_path = dev_priv->params.dmc_firmware_path;
+ dmc->fw_path = i915->params.dmc_firmware_path;
}
if (!dmc->fw_path) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
- return;
+ goto out;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->display.dmc.work);
+ i915->display.dmc.dmc = dmc;
+
+ drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dmc->work);
+
+ return;
+
+out:
+ kfree(dmc);
}
/**
- * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
- * @dev_priv: i915 drm device
+ * intel_dmc_suspend() - prepare DMC firmware before system suspend
+ * @i915: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_suspend(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ if (!HAS_DMC(i915))
return;
- flush_work(&dev_priv->display.dmc.work);
+ if (dmc)
+ flush_work(&dmc->work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_put(i915);
}
/**
- * intel_dmc_ucode_resume() - init DMC firmware during system resume
- * @dev_priv: i915 drm device
+ * intel_dmc_resume() - init DMC firmware during system resume
+ * @i915: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_dmc_ucode_suspend().
+ * resources released in intel_dmc_suspend().
*/
-void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_resume(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_get(i915);
}
/**
- * intel_dmc_ucode_fini() - unload the DMC firmware.
- * @dev_priv: i915 drm device.
+ * intel_dmc_fini() - unload the DMC firmware.
+ * @i915: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_fini(struct drm_i915_private *i915)
{
- int id;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
- intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ intel_dmc_suspend(i915);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+
+ if (dmc) {
+ for_each_dmc_id(dmc_id)
+ kfree(dmc->dmc_info[dmc_id].payload);
- for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->display.dmc.dmc_info[id].payload);
+ kfree(dmc);
+ i915->display.dmc.dmc = NULL;
+ }
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->display.dmc;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
if (!HAS_DMC(i915))
return;
+ i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
i915_error_printf(m, "DMC loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- i915_error_printf(m, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
+ if (dmc)
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
intel_wakeref_t wakeref;
- struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->display.dmc;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
str_yes_no(IS_ALDERLAKE_P(i915) ||
DISPLAY_VER(i915) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
if (!intel_dmc_has_payload(i915))
goto out;
@@ -1072,9 +1188,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(i915, dc6_reg));
-out:
seq_printf(m, "program base: 0x%08x\n",
intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+
+out:
seq_printf(m, "ssp base: 0x%08x\n",
intel_de_read(i915, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 435eab9b016b..fd607afff2ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -6,50 +6,20 @@
#ifndef __INTEL_DMC_H__
#define __INTEL_DMC_H__
-#include "i915_reg_defs.h"
-#include "intel_wakeref.h"
-#include <linux/workqueue.h>
+#include <linux/types.h>
struct drm_i915_error_state_buf;
struct drm_i915_private;
+enum pipe;
-enum {
- DMC_FW_MAIN = 0,
- DMC_FW_PIPEA,
- DMC_FW_PIPEB,
- DMC_FW_PIPEC,
- DMC_FW_PIPED,
- DMC_FW_MAX
-};
-
-struct intel_dmc {
- struct work_struct work;
- const char *fw_path;
- u32 max_fw_size; /* bytes */
- u32 version;
- struct dmc_fw_info {
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dmc_offset;
- u32 start_mmioaddr;
- u32 dmc_fw_size; /*dwords */
- u32 *payload;
- bool present;
- } dmc_info[DMC_FW_MAX];
-
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
-void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
void intel_dmc_disable_program(struct drm_i915_private *i915);
-void intel_dmc_ucode_fini(struct drm_i915_private *i915);
-void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
-void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void intel_dmc_fini(struct drm_i915_private *i915);
+void intel_dmc_suspend(struct drm_i915_private *i915);
+void intel_dmc_resume(struct drm_i915_private *i915);
bool intel_dmc_has_payload(struct drm_i915_private *i915);
void intel_dmc_debugfs_register(struct drm_i915_private *i915);
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 5e5e41644ddf..cf10094acae3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -11,6 +11,16 @@
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define _PIPEDMC_CONTROL_A 0x45250
+#define _PIPEDMC_CONTROL_B 0x45254
+#define PIPEDMC_CONTROL(pipe) _MMIO_PIPE(pipe, \
+ _PIPEDMC_CONTROL_A, \
+ _PIPEDMC_CONTROL_B)
+#define PIPEDMC_ENABLE REG_BIT(0)
+
+#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
+#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 80d95cec8f9d..aee93b0d810e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -288,7 +288,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
{
- int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
int max_lanes = dig_port->max_lanes;
if (vbt_max_lanes)
@@ -425,7 +425,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
- max_rate = intel_bios_dp_max_link_rate(encoder);
+ max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
if (intel_dp_is_edp(intel_dp)) {
struct intel_connector *connector = intel_dp->attached_connector;
@@ -1415,6 +1415,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
DP_DSC_MINOR_SHIFT;
}
+static int intel_dp_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
+ * lines is an optimal slice height, but any size can be used as long as
+ * vertical active integer multiple and maximum vertical slice count
+ * requirements are met.
+ */
+ for (slice_height = 108; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ /*
+ * Highly unlikely we reach here as most of the resolutions will end up
+ * finding appropriate slice_height in above loop but returning
+ * slice_height as 2 here as it should work with all resolutions.
+ */
+ return 2;
+}
+
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -1433,17 +1455,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8. Eventually add
- * logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
+ vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
@@ -1727,7 +1739,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -1991,7 +2003,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
static bool intel_dp_has_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2057,7 +2068,7 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
pipe_config->sdp_split_enable =
- intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_has_audio(encoder, conn_state) &&
intel_dp_is_uhbr(pipe_config);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
@@ -2080,7 +2091,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_dp_has_audio(encoder, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -2279,10 +2292,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
- wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ connector->base.base.id, connector->base.name,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
+
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
}
/* If the device supports it, try to set the power state appropriately */
@@ -3648,12 +3666,11 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
- struct edid *block = intel_connector->detect_edid;
+ /* FIXME: Get rid of drm_edid_raw() */
+ const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
- /* We have to write the checksum
- * of the last block read
- */
- block += intel_connector->detect_edid->extensions;
+ /* We have to write the checksum of the last block read */
+ block += block->extensions;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
@@ -4475,29 +4492,34 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return is_connected;
}
-static struct edid *
+static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
- /* use cached edid if we have one */
- if (intel_connector->edid) {
+ /* Use panel fixed edid if we have one */
+ if (fixed_edid) {
/* invalid edid */
- if (IS_ERR(intel_connector->edid))
+ if (IS_ERR(fixed_edid))
return NULL;
- return drm_edid_duplicate(intel_connector->edid);
- } else
- return drm_get_edid(&intel_connector->base,
- &intel_dp->aux.ddc);
+ return drm_edid_dup(fixed_edid);
+ }
+
+ return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
}
static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
+ const struct edid *edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
intel_dp->dfp.max_bpc =
drm_dp_downstream_max_bpc(intel_dp->dpcd,
@@ -4597,21 +4619,27 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool vrr_capable;
intel_dp_unset_edid(intel_dp);
- edid = intel_dp_get_edid(intel_dp);
- connector->detect_edid = edid;
+ drm_edid = intel_dp_get_edid(intel_dp);
+ connector->detect_edid = drm_edid;
+
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
- intel_dp_update_dfp(intel_dp, edid);
+ intel_dp_update_dfp(intel_dp, drm_edid);
intel_dp_update_420(intel_dp);
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_dp->has_audio = drm_detect_monitor_audio(edid);
@@ -4626,7 +4654,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
drm_dp_cec_unset_edid(&intel_dp->aux);
- kfree(connector->detect_edid);
+ drm_edid_free(connector->detect_edid);
connector->detect_edid = NULL;
intel_dp->has_hdmi_sink = false;
@@ -4790,12 +4818,10 @@ intel_dp_force(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct edid *edid;
- int num_modes = 0;
+ int num_modes;
- edid = intel_connector->detect_edid;
- if (edid)
- num_modes = intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ num_modes = drm_edid_connector_add_modes(connector);
/* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
@@ -4804,7 +4830,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
if (num_modes)
return num_modes;
- if (!edid) {
+ if (!intel_connector->detect_edid) {
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
struct drm_display_mode *mode;
@@ -4841,7 +4867,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
/*
@@ -5119,8 +5145,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5132,13 +5159,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
return true;
- return intel_bios_is_port_edp(dev_priv, port);
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+}
+
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return _intel_dp_is_port_edp(i915, devdata, port);
}
static bool
-has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- if (intel_bios_is_lspcon_present(i915, port))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -5173,14 +5211,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
drm_connector_attach_content_type_property(connector);
intel_attach_hdmi_colorspace_property(connector);
} else {
intel_attach_dp_colorspace_property(connector);
}
- if (has_gamut_metadata_dip(dev_priv, port))
+ if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
if (HAS_VRR(dev_priv))
@@ -5222,11 +5260,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n",
- connector->base.base.id, connector->base.name,
- pipe_name(pipe));
}
intel_backlight_setup(connector, pipe);
@@ -5240,7 +5273,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -5287,29 +5320,28 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_lock(&dev_priv->drm.mode_config.mutex);
- edid = drm_get_edid(connector, &intel_dp->aux.ddc);
- if (!edid) {
+ drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc);
+ if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- edid = intel_opregion_get_edid(intel_connector);
- if (edid)
+ drm_edid = intel_opregion_get_edid(intel_connector);
+ if (drm_edid)
drm_dbg_kms(&dev_priv->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
connector->base.id, connector->name);
}
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector, edid);
- } else {
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ if (drm_edid) {
+ if (drm_edid_connector_update(connector, drm_edid) ||
+ !drm_edid_connector_add_modes(connector)) {
+ drm_edid_connector_update(connector, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = ERR_PTR(-ENOENT);
}
- intel_connector->edid = edid;
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
- encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);
@@ -5333,7 +5365,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
goto out_vdd_off;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, drm_edid);
intel_edp_backlight_setup(intel_dp, intel_connector);
@@ -5403,7 +5435,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port)) {
+ if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 5a176bfb10a2..96967e21c94c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
+#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
@@ -737,3 +738,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
+
+static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ /* SKL has DDI E but no AUX E */
+ if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ return AUX_CH_A;
+
+ return (enum aux_ch)encoder->port;
+}
+
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum aux_ch aux_ch;
+
+ aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
+ if (aux_ch != AUX_CH_NONE) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+ return aux_ch;
+ }
+
+ aux_ch = default_aux_ch(encoder);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 738577537bc7..138e340f94ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -6,9 +6,13 @@
#ifndef __INTEL_DP_AUX_H__
#define __INTEL_DP_AUX_H__
+enum aux_ch;
struct intel_dp;
+struct intel_encoder;
void intel_dp_aux_fini(struct intel_dp *intel_dp);
void intel_dp_aux_init(struct intel_dp *intel_dp);
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
+
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 83af95bce98d..95cc5251843e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
+static bool is_intel_tcon_cap(const u8 tcon_cap[4])
+{
+ return tcon_cap[0] >= 1;
+}
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
return false;
- if (tcon_cap[0] >= 1) {
- drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
- tcon_cap[0]);
- } else {
- drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
- tcon_cap[0]);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ connector->base.base.id, connector->base.name,
+ is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
+
+ if (!is_intel_tcon_cap(tcon_cap))
return false;
- }
/*
* If we don't have HDR static metadata there is no way to
@@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
- "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
}
@@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
u8 buf[2] = { 0 };
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf))
- drm_err(dev, "Failed to write brightness level to DPCD\n");
+ drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return;
}
@@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
}
- if (ctrl != old_ctrl)
- if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+ if (ctrl != old_ctrl &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
+static const char *dpcd_vs_pwm_str(bool aux)
+{
+ return aux ? "DPCD" : "PWM";
+}
+
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
@@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
&connector->base.display_info.luminance_range;
int ret;
- if (panel->backlight.edp.intel.sdr_uses_aux) {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
- } else {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux));
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
panel->backlight.min = 0;
}
- drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
- panel->backlight.max);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.min, panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
@@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup PWM backlight controls for eDP backlight: %d\n",
- ret);
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ connector->base.base.id, connector->base.name);
return true;
}
return false;
@@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* interfaces is to probe for Intel's first, and VESA's second.
*/
if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
- drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
- drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8b0e4defa3f1..a860cbc5dbea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
return 0;
}
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return connector->port->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
@@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = connector->port->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
+ pipe_config->has_audio =
+ intel_dp_mst_has_audio(conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* for MST we always configure max link bw - the spec doesn't
@@ -524,8 +531,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *old_mst_state =
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *new_mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ const struct drm_dp_mst_atomic_payload *old_payload =
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ struct drm_dp_mst_atomic_payload *new_payload =
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -533,8 +546,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
+ old_payload, new_payload);
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
@@ -598,7 +611,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* no clock to the transcoder"
*/
if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
@@ -678,7 +691,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* here for the following ones.
*/
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(encoder, pipe_config);
+ intel_ddi_enable_transcoder_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -1223,3 +1236,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
}
+
+/**
+ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
+ * @state: atomic state
+ * @connector: connector to add the state for
+ * @crtc: the CRTC @connector is attached to
+ *
+ * Add the MST topology state for @connector to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ struct intel_crtc *crtc)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+
+ if (!connector->mst_port)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(&state->base,
+ &connector->mst_port->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
+
+ return 0;
+}
+
+/**
+ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
+ * @state: atomic state
+ * @crtc: CRTC to add the state for
+ *
+ * Add the MST topology state for @crtc to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *_connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ struct intel_connector *connector = to_intel_connector(_connector);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f7301de6cdfb..f1815bb72267 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
@@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7eb7440b3180..62b93d097e44 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -376,7 +376,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+ dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
@@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
"force reprogramming it\n", phy);
}
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val |= phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
phy);
/* Program PLL Rcomp code offset */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
+ 0xE4 << IREF0RC_OFFSET_SHIFT);
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
+ 0xE4 << IREF1RC_OFFSET_SHIFT);
/* Program power gating */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy_info->dual_channel) {
- val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
- }
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
+ OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
+
+ if (phy_info->dual_channel)
+ intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0,
+ DW6_OLDO_DYN_PWR_DOWN_EN);
if (phy_info->rcomp_phy != -1) {
u32 grc_code;
@@ -442,40 +432,32 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
- phy_info->rcomp_phy);
+ val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
+ dev_priv->display.state.bxt_phy_grc = val;
+
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
-
- val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
- val |= GRC_DIS | GRC_RDY_OVRD;
- intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
+ 0, GRC_DIS | GRC_RDY_OVRD);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val &= ~phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -568,7 +550,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
"BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
- u32 grc_code = dev_priv->bxt_phy_grc;
+ u32 grc_code = dev_priv->display.state.bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index c236aafe9be0..4e9c18be7e1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1910,7 +1910,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, DPLL_MD(PIPE_B),
crtc_state->dpll_hw_state.dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 1974eb580ed1..22fc908b7e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -608,17 +608,15 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, WRPLL_CTL(id));
- intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
@@ -626,17 +624,15 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, SPLL_CTL);
- intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
@@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
- DPLL_CTRL1_SSC(id) |
- DPLL_CTRL1_LINK_RATE_MASK(id));
- val |= pll->state.hw_state.ctrl1 << (id * 6);
-
- intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
+ pll->state.hw_state.ctrl1 << (id * 6));
intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
@@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
@@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, regs[id].ctl);
}
@@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_REF_SEL;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ 0, PORT_PLL_POWER_ENABLE);
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
}
/* Disable 10 bit clock */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
+ PORT_PLL_10BIT_CLK_ENABLE, 0);
/* Write P1 & P2 */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->state.hw_state.ebb0;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
/* Write M2 integer */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- temp &= ~PORT_PLL_M2_INT_MASK;
- temp |= pll->state.hw_state.pll0;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
+ PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
/* Write N */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->state.hw_state.pll1;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
+ PORT_PLL_N_MASK, pll->state.hw_state.pll1);
/* Write M2 fraction */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->state.hw_state.pll2;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
+ PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
/* Write M2 fraction enable */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->state.hw_state.pll3;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
+ PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
/* Write coeff */
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
@@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->state.hw_state.pll8;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
+ PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->state.hw_state.pll9;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
+ PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
@@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
@@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- u32 temp;
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_ENABLE, 0);
if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
!i915_mmio_reg_valid(div0_reg));
if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
- intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
- hw_state->div0);
+ intel_de_rmw(dev_priv, div0_reg,
+ TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
- u32 val;
/*
* Some of the following registers have reserved fields, so program
@@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
- val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
- val |= hw_state->mg_refclkin_ctl;
- intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
+ MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
- val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
- val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- val |= hw_state->mg_clktop2_coreclkctl1;
- intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
+ hw_state->mg_clktop2_coreclkctl1);
- val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
- val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
- val |= hw_state->mg_clktop2_hsclkctl;
- intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
+ hw_state->mg_clktop2_hsclkctl);
intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
@@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
hw_state->mg_pll_frac_lock);
intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
- val &= ~hw_state->mg_pll_bias_mask;
- val |= hw_state->mg_pll_bias;
- intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
+ hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
- val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
- val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
+ hw_state->mg_pll_tdc_coldst_bias_mask,
+ hw_state->mg_pll_tdc_coldst_bias);
intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
@@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
/*
* The spec says we need to "wait" but it also says it should be
@@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
* since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
*/
val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+ val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}
@@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
/* The first steps are done by intel_ddi_post_disable(). */
/*
@@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothing here.
*/
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
/* DVFS post sequence would be here. See the comment above. */
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
/*
* The spec says we need to "wait" but it also says it should be
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 5b9e44443814..760e63cdc0c8 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
- u32 val, bit;
+ u32 bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- bit = PIPECONF_REFRESH_RATE_ALT_VLV;
+ bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
- bit = PIPECONF_REFRESH_RATE_ALT_ILK;
+ bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if (refresh_rate == DRRS_REFRESH_RATE_LOW)
- val |= bit;
- else
- val &= ~bit;
-
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder),
+ bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
static void
@@ -374,16 +368,16 @@ out:
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
- NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
+ NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
{
debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
crtc, &intel_drrs_debugfs_status_fops);
- debugfs_create_file("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
- crtc, &intel_drrs_debugfs_ctl_fops);
+ debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
+ crtc, &intel_drrs_debugfs_ctl_fops);
}
static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 3d63c1bf1e4f..19e422da57dc 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -30,21 +30,24 @@ struct intel_dsb {
struct intel_crtc *crtc;
/*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
+ * maximum number of dwords the buffer will hold.
*/
- int free_pos;
+ unsigned int size;
/*
- * ins_start_offset will help to store start address of the dsb
+ * free_pos will point the first free dword and
+ * help in calculating tail of command buffer.
+ */
+ unsigned int free_pos;
+
+ /*
+ * ins_start_offset will help to store start dword of the dsb
* instuction and help in identifying the batch of auto-increment
* register.
*/
- u32 ins_start_offset;
+ unsigned int ins_start_offset;
};
-#define DSB_BUF_SIZE (2 * PAGE_SIZE)
-
/**
* DOC: DSB
*
@@ -64,80 +67,87 @@ struct intel_dsb {
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_NOOP 0x0
#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_WAIT_USEC 0x2
+#define DSB_OPCODE_WAIT_LINES 0x3
+#define DSB_OPCODE_WAIT_VBLANKS 0x4
+#define DSB_OPCODE_WAIT_DSL_IN 0x5
+#define DSB_OPCODE_WAIT_DSL_OUT 0x6
+#define DSB_OPCODE_INTERRUPT 0x7
#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_OPCODE_POLL 0xA
#define DSB_BYTE_EN 0xF
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
+static bool assert_dsb_has_room(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* each instruction is 2 dwords */
+ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
+ "[CRTC:%d:%s] DSB %d buffer overflow\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+}
+
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
enum dsb_id id)
{
- return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
+ return intel_de_read(i915, DSB_CTRL(pipe, id)) & DSB_STATUS_BUSY;
}
-static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 dsb_ctrl;
+ u32 *buf = dsb->cmd_buf;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ if (!assert_dsb_has_room(dsb))
+ return;
- dsb_ctrl |= DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+ dsb->ins_start_offset = dsb->free_pos;
+
+ buf[dsb->free_pos++] = ldw;
+ buf[dsb->free_pos++] = udw;
}
-static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
+ u32 opcode, i915_reg_t reg)
{
- u32 dsb_ctrl;
+ const u32 *buf = dsb->cmd_buf;
+ u32 prev_opcode, prev_reg;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ prev_opcode = buf[dsb->ins_start_offset + 1] >> DSB_OPCODE_SHIFT;
+ prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
- dsb_ctrl &= ~DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+ return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
+}
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_MMIO_WRITE, reg);
+}
+
+static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_INDEXED_WRITE, reg);
}
/**
- * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
- * increment register.
+ * intel_dsb_reg_write() - Emit register wriite to the DSB context
* @dsb: DSB context
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
- * buffer of DSB for auto-increment register. During command buffer overflow,
- * a warning is thrown and rest all erroneous condition register programming
- * is done through mmio write.
+ * buffer of DSB.
*/
-
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb,
- i915_reg_t reg, u32 val)
+void intel_dsb_reg_write(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
{
- struct intel_crtc *crtc = dsb->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
- u32 reg_val;
-
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
-
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -154,124 +164,114 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
- if (reg_val != i915_mmio_reg_offset(reg)) {
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
+ !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ } else {
+ u32 *buf = dsb->cmd_buf;
- dsb->ins_start_offset = dsb->free_pos;
+ if (!assert_dsb_has_room(dsb))
+ return;
- /* Update the size. */
- buf[dsb->free_pos++] = 1;
+ /* convert to indexed write? */
+ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
+ u32 prev_val = buf[dsb->ins_start_offset + 0];
- /* Update the opcode and reg. */
- buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
- DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 0] = 1; /* count */
+ buf[dsb->ins_start_offset + 1] =
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 2] = prev_val;
- /* Update the value. */
- buf[dsb->free_pos++] = val;
- } else {
- /* Update the new value. */
- buf[dsb->free_pos++] = val;
+ dsb->free_pos++;
+ }
- /* Update the size. */
+ buf[dsb->free_pos++] = val;
+ /* Update the count */
buf[dsb->ins_start_offset]++;
- }
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+ }
}
-/**
- * intel_dsb_reg_write() -Write to the DSB context for normal
- * register.
- * @crtc_state: intel_crtc_state structure
- * @reg: register address.
- * @val: value.
- *
- * This function is used for writing register-value pair in command
- * buffer of DSB. During command buffer overflow, a warning is thrown
- * and rest all erroneous condition register programming is done
- * through mmio write.
- */
-void intel_dsb_reg_write(struct intel_dsb *dsb,
- i915_reg_t reg, u32 val)
+static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
- struct intel_crtc *crtc = dsb->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
+ u32 aligned_tail, tail;
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
- dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = val;
- buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
- i915_mmio_reg_offset(reg);
+ if (aligned_tail > tail)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ aligned_tail - tail);
+
+ dsb->free_pos = aligned_tail / 4;
+}
+
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
* @dsb: DSB context
+ * @wait_for_vblank: wait for vblank before executing
*
* This function is used to do actual write to hardware using DSB.
*/
-void intel_dsb_commit(struct intel_dsb *dsb)
+void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- if (!(dsb && dsb->free_pos))
+ tail = dsb->free_pos * 4;
+ if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
return;
- if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
- goto reset;
-
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "HEAD_PTR write failed - dsb engine is busy.\n");
- goto reset;
+ drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+ return;
}
+
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id),
+ (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) |
+ DSB_ENABLE);
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
-
- tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
- if (tail > dsb->free_pos * 4)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- (tail - dsb->free_pos * 4));
-
- if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "TAIL_PTR write failed - dsb engine is busy.\n");
- goto reset;
- }
- drm_dbg_kms(&dev_priv->drm,
- "DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
- if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
+}
+
+void intel_dsb_wait(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1))
drm_err(&dev_priv->drm,
- "Timed out waiting for DSB workload completion.\n");
- goto reset;
- }
+ "[CRTC:%d:%s] DSB %d timed out waiting for idle\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
-reset:
+ /* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
}
/**
* intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
* @crtc: the CRTC
+ * @max_cmds: number of commands we need to fit into command buffer
*
* This function prepare the command buffer which is used to store dsb
* instructions with data.
@@ -279,25 +279,30 @@ reset:
* Returns:
* DSB context, NULL on failure
*/
-struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc)
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dsb *dsb;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct intel_dsb *dsb;
struct i915_vma *vma;
+ unsigned int size;
u32 *buf;
- intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return NULL;
- dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
+ dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ /* ~1 qword per instruction, full cachelines */
+ size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
goto out_put_rpm;
@@ -319,6 +324,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc)
dsb->vma = vma;
dsb->crtc = crtc;
dsb->cmd_buf = buf;
+ dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
@@ -329,7 +335,8 @@ out_put_rpm:
kfree(dsb);
out:
drm_info_once(&i915->drm,
- "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+ "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
+ crtc->base.base.id, crtc->base.name, DSB1);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 25f13c4d5389..b8148b47022d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -13,12 +13,14 @@
struct intel_crtc;
struct intel_dsb;
-struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc);
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds);
+void intel_dsb_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb,
- i915_reg_t reg, u32 val);
-void intel_dsb_commit(struct intel_dsb *dsb);
+void intel_dsb_commit(struct intel_dsb *dsb,
+ bool wait_for_vblank);
+void intel_dsb_wait(struct intel_dsb *dsb);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 20e466d843ce..049443245310 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using DCS for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 4aeae0f3ac91..eb2dcd866cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -444,11 +444,8 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
- intel_de_write(dev_priv, DPLL(pipe),
- dpll[pipe] | DPLL_DVO_2X_MODE);
- }
+ for_each_pipe(dev_priv, pipe)
+ dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
@@ -554,6 +551,6 @@ void intel_dvo_init(struct drm_i915_private *i915)
*/
intel_panel_add_encoder_fixed_mode(connector, encoder);
- intel_panel_init(connector);
+ intel_panel_init(connector, NULL);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index ea8eb7dcee38..f7e98e1c6470 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -25,7 +25,7 @@
#include "i915_reg_defs.h"
-#include "intel_display.h"
+#include "intel_display_limits.h"
enum drm_connector_status;
struct drm_display_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 93d0e46e5481..799bdc81a6a9 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2007,6 +2007,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
vm = intel_dpt_create(intel_fb);
if (IS_ERR(vm)) {
+ drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
goto err;
}
@@ -2017,11 +2018,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
+ goto err_free_dpt;
}
return 0;
+err_free_dpt:
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
err:
intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
@@ -2046,6 +2050,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
return ERR_PTR(-EREMOTE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 5e69d3c11d21..b507ff944864 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -331,15 +331,15 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_fb.start, U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_llb.start, U32_MAX));
intel_de_write(i915, FBC_CFB_BASE,
- i915->dsm.start + fbc->compressed_fb.start);
+ i915->dsm.stolen.start + fbc->compressed_fb.start);
intel_de_write(i915, FBC_LL_BASE,
- i915->dsm.start + fbc->compressed_llb.start);
+ i915->dsm.stolen.start + fbc->compressed_llb.start);
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -712,7 +712,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
(DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
- end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
+ end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -1807,10 +1807,10 @@ static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
- intel_fbc_debugfs_false_color_get,
- intel_fbc_debugfs_false_color_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
+ intel_fbc_debugfs_false_color_get,
+ intel_fbc_debugfs_false_color_set,
+ "%llu\n");
static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
struct dentry *parent)
@@ -1819,8 +1819,8 @@ static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
fbc, &intel_fbc_debugfs_status_fops);
if (fbc->funcs->set_false_color)
- debugfs_create_file("i915_fbc_false_color", 0644, parent,
- fbc, &intel_fbc_debugfs_false_color_fops);
+ debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
+ fbc, &intel_fbc_debugfs_false_color_fops);
}
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 03ed4607a46d..3659350061a7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -170,7 +170,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features.
*/
- if (size * 2 < dev_priv->stolen_usable_size)
+ if (size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
@@ -267,23 +267,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
- /* setup aperture base/size for vesafb takeover */
obj = intel_fb_obj(&intel_fb->base);
if (i915_gem_object_is_lmem(obj)) {
struct intel_memory_region *mem = obj->mm.region;
- info->apertures->ranges[0].base = mem->io_start;
- info->apertures->ranges[0].size = mem->io_size;
-
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
(unsigned long)(mem->io_start +
i915_gem_object_get_dma_address(obj, 0));
info->fix.smem_len = obj->base.size;
} else {
- info->apertures->ranges[0].base = ggtt->gmadr.start;
- info->apertures->ranges[0].size = ggtt->mappable_end;
-
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
(unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
@@ -328,8 +321,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
@@ -347,6 +352,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
+ drm_fb_helper_unprepare(&ifbdev->helper);
kfree(ifbdev);
}
@@ -527,10 +533,12 @@ int intel_fbdev_init(struct drm_device *dev)
return -ENOMEM;
mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
- if (!intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->preferred_bpp = 32;
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
@@ -549,14 +557,13 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
struct intel_fbdev *ifbdev = data;
/* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp))
+ if (drm_fb_helper_initial_config(&ifbdev->helper))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
}
-void intel_fbdev_initial_config_async(struct drm_device *dev)
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -626,7 +633,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
- if (!ifbdev || !ifbdev->vma)
+ if (!ifbdev)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
+ return;
+
+ if (!ifbdev->vma)
goto set_suspend;
info = ifbdev->helper.info;
@@ -693,9 +706,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
-void intel_fbdev_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 0e95e9472fa3..04fd523a5023 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -15,12 +15,12 @@ struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
@@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0;
}
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
}
@@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 063f1da4f229..f55b4893c00f 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -366,8 +366,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -439,19 +438,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
@@ -538,13 +529,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -593,13 +580,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -719,19 +702,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
}
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE_IVB,
+ FDI_LINK_TRAIN_PATTERN_2_IVB);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_PATTERN_MASK_CPT,
+ FDI_LINK_TRAIN_PATTERN_2_CPT);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
@@ -837,9 +814,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
@@ -865,25 +841,21 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_LINK_TRAIN_PAT1);
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
@@ -898,7 +870,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -906,30 +877,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
-
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
intel_ddi_disable_clock(encoder);
-
- val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -945,16 +901,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
+ intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
intel_de_posting_read(dev_priv, reg);
udelay(200);
@@ -974,28 +928,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
/* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(100);
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
/* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(100);
}
@@ -1007,15 +951,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
intel_de_posting_read(dev_priv, reg);
@@ -1027,11 +969,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
@@ -1042,9 +981,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- /* BPC in FDI rx is consistent with that in PIPECONF */
+ /* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp);
intel_de_posting_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index d636d21fa9ce..b708a62e509a 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -31,6 +31,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
+#include "intel_pch_display.h"
/**
* DOC: fifo underrun handling
@@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ crtc->cpu_fifo_underrun_disabled = !enable;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = !enable;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index 2e47d7d3c101..b00d8abebcf9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -9,8 +9,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_crtc;
enum pipe;
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc, bool enable);
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0bc4f6b48e80..3ddfc8080ee8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -215,41 +215,23 @@ intel_gmbus_reset(struct drm_i915_private *i915)
static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(i915, DSPCLK_GATE_D(i915));
- if (!enable)
- val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, DSPCLK_GATE_D(i915), val);
+ intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
- if (!enable)
- val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
- if (!enable)
- val |= BXT_GMBUS_GATING_DIS;
- else
- val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
+ !enable ? BXT_GMBUS_GATING_DIS : 0);
}
static u32 get_reserved(struct intel_gmbus *bus)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6406fd487ee5..2984d2810e42 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -943,8 +943,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port);
- intel_de_write(dev_priv, HDCP_REP_CTL,
- intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+ intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
@@ -1819,12 +1818,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_AUTH_STATUS) {
+ LINK_AUTH_STATUS)
/* Link is Authenticated. Now set for Encryption */
- intel_de_write(dev_priv,
- HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
- }
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ 0, CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_set(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1848,8 +1845,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS));
- intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ CTL_LINK_ENCRYPTION_REQ, 0);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 6a2ee342eab5..c7e9e1fbed37 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,6 +44,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_de.h"
@@ -237,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
@@ -313,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -395,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -471,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv,
@@ -537,9 +522,7 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
- if ((DISPLAY_VER(dev_priv) == 13 ||
- IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) && crtc_state->has_psr &&
- type == DP_SDP_VSC)
+ if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
return;
val |= hsw_infoframe_enable(type);
@@ -1796,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata);
if (vbt_max_tmds_clock)
max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
@@ -2153,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -2241,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state)
!is_power_of_2(crtc_state->uapi.encoder_mask);
}
+static bool source_supports_scrambling(struct intel_encoder *encoder)
+{
+ /*
+ * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and
+ * scrambling is supported.
+ * But there seem to be cases where certain platforms that support
+ * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is
+ * capped by VBT to less than 340MHz.
+ *
+ * In such cases when an HDMI2.0 sink is connected, it creates a
+ * problem : the platform and the sink both support scrambling but the
+ * HDMI 1.4 retimer chip doesn't.
+ *
+ * So go for scrambling, based on the max tmds clock taking into account,
+ * restrictions coming from VBT.
+ */
+ return intel_hdmi_source_max_tmds_clock(encoder) > 340000;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2271,7 +2273,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier = 2;
pipe_config->has_audio =
- intel_hdmi_has_audio(encoder, pipe_config, conn_state);
+ intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* Try to respect downstream TMDS clock limits first, if
@@ -2302,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
+ if (scdc->scrambling.supported && source_supports_scrambling(encoder)) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -2360,7 +2363,7 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- kfree(to_intel_connector(connector)->detect_edid);
+ drm_edid_free(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
@@ -2421,7 +2424,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
@@ -2429,17 +2433,23 @@ intel_hdmi_set_edid(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
- if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) {
drm_dbg_kms(&dev_priv->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
- to_intel_connector(connector)->detect_edid = edid;
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(connector, drm_edid);
+
+ to_intel_connector(connector)->detect_edid = drm_edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
@@ -2515,13 +2525,8 @@ intel_hdmi_force(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
-
- edid = to_intel_connector(connector)->detect_edid;
- if (edid == NULL)
- return 0;
-
- return intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ return drm_edid_connector_add_modes(connector);
}
static struct i2c_adapter *
@@ -2850,11 +2855,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
enum port port = encoder->port;
u8 ddc_pin;
- ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
if (ddc_pin) {
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2880,8 +2886,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2902,7 +2909,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
+ if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
dig_port->set_infoframes = lspcon_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 8aaaef4d7856..5863763de530 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
* @dev_priv: the i915 drm device private data
- * @pipe: pipe
+ * @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
* @ls_clock: Link symbol clock in kHz
@@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* Notify lpe audio driver of eld change.
*/
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
unsigned long irqflags;
@@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
- ppdata->pipe = pipe;
+ ppdata->pipe = cpu_transcoder;
ppdata->ls_clock = ls_clock;
ppdata->dp_output = dp_output;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index f848c5038714..0beecac267ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-enum pipe;
enum port;
+enum transcoder;
struct drm_i915_private;
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 9ff1c0b223ad..bb3b5355a0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return;
if (!lspcon->active) {
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index aecec992cd0d..a504b3a7fbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -49,6 +49,7 @@
#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
@@ -84,18 +85,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(dev_priv, lvds_reg);
+ val = intel_de_read(i915, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ if (HAS_PCH_CPT(i915))
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
return val & LVDS_PORT_EN;
}
@@ -103,31 +104,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
+ wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
- pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -139,20 +139,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->hw.adjusted_mode.flags |= flags;
+ crtc_state->hw.adjusted_mode.flags |= flags;
if (DISPLAY_VER(dev_priv) < 5)
- pipe_config->gmch_pfit.lvds_border_bits =
+ crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
- pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -216,41 +216,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_CONTROL(0), val);
intel_de_write(dev_priv, PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
intel_de_write(dev_priv, PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
intel_de_write(dev_priv, PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(dev_priv)) {
- assert_fdi_rx_pll_disabled(dev_priv, pipe);
- assert_shared_dpll_disabled(dev_priv,
- pipe_config->shared_dpll);
+ if (HAS_PCH_SPLIT(i915)) {
+ assert_fdi_rx_pll_disabled(i915, pipe);
+ assert_shared_dpll_disabled(i915, crtc_state->shared_dpll);
} else {
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(i915, pipe);
}
- intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(i915)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -260,7 +263,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= pipe_config->gmch_pfit.lvds_border_bits;
+ temp |= crtc_state->gmch_pfit.lvds_border_bits;
/*
* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -283,14 +286,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/*
* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg.
+ * only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
*/
- if (pipe_config->dither && pipe_config->pipe_bpp == 18)
+ if (crtc_state->dither && crtc_state->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -301,7 +304,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(dev_priv, lvds_encoder->reg, temp);
+ intel_de_write(i915, lvds_encoder->reg, temp);
}
/*
@@ -309,25 +312,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
*/
static void intel_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_backlight_enable(pipe_config, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -338,14 +338,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power off\n");
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
@@ -386,19 +384,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
}
static enum drm_mode_status
-intel_lvds_mode_valid(struct drm_connector *connector,
+intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -408,23 +406,21 @@ intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
+static int intel_lvds_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder =
- to_lvds_encoder(intel_encoder);
- struct intel_connector *intel_connector =
- lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
- drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
+ drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -433,14 +429,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
else
lvds_bpp = 6*3;
- if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- drm_dbg_kms(&dev_priv->drm,
+ if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
+ drm_dbg_kms(&i915->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
- pipe_config->pipe_bpp = lvds_bpp;
+ crtc_state->pipe_bpp, lvds_bpp);
+ crtc_state->pipe_bpp = lvds_bpp;
}
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/*
* We have timings from the BIOS for the panel, put them in
@@ -448,17 +444,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ ret = intel_panel_compute_config(connector, adjusted_mode);
if (ret)
return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ if (HAS_PCH_SPLIT(i915))
+ crtc_state->has_pch_encoder = true;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(crtc_state, conn_state);
if (ret)
return ret;
@@ -474,15 +470,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
-static int intel_lvds_get_modes(struct drm_connector *connector)
+static int intel_lvds_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
+
+ /* Use panel fixed edid if we have one */
+ if (!IS_ERR_OR_NULL(fixed_edid)) {
+ drm_edid_connector_update(&connector->base, fixed_edid);
- /* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- return drm_add_edid_modes(connector, intel_connector->edid);
+ return drm_edid_connector_add_modes(&connector->base);
+ }
- return intel_panel_get_modes(intel_connector);
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -581,12 +581,12 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "AOpen i45GMx-I",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
- DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
- },
- },
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
@@ -603,14 +603,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Clientron E830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
- },
- },
- {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
@@ -760,11 +760,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(&i915->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -772,24 +772,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (dev_priv->params.lvds_channel_mode > 0)
- return dev_priv->params.lvds_channel_mode == 2;
+ if (i915->params.lvds_channel_mode > 0)
+ return i915->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -804,8 +804,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(dev_priv, lvds_encoder->reg);
- if (HAS_PCH_CPT(dev_priv))
+ val = intel_de_read(i915, lvds_encoder->reg);
+ if (HAS_PCH_CPT(i915))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -822,56 +822,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *dev_priv)
+void intel_lvds_init(struct drm_i915_private *i915)
{
struct intel_lvds_encoder *lvds_encoder;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct edid *edid;
+ struct intel_connector *connector;
+ const struct drm_edid *drm_edid;
+ struct intel_encoder *encoder;
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support,
+ drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->display.vbt.int_lvds_support) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!i915->display.vbt.int_lvds_support) {
+ drm_dbg_kms(&i915->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(i915))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(dev_priv, lvds_reg);
+ lvds = intel_de_read(i915, lvds_reg);
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(i915)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+ if (!intel_bios_is_lvds_present(i915, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -879,57 +877,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = intel_connector;
+ lvds_encoder->attached_connector = connector;
+ encoder = &lvds_encoder->base;
- intel_encoder = &lvds_encoder->base;
- encoder = &intel_encoder->base;
- connector = &intel_connector->base;
- drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs,
+ drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
- intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_enable = intel_pre_enable_lvds;
- intel_encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- intel_encoder->disable = pch_disable_lvds;
- intel_encoder->post_disable = pch_post_disable_lvds;
+ encoder->enable = intel_enable_lvds;
+ encoder->pre_enable = intel_pre_enable_lvds;
+ encoder->compute_config = intel_lvds_compute_config;
+ if (HAS_PCH_SPLIT(i915)) {
+ encoder->disable = pch_disable_lvds;
+ encoder->post_disable = pch_post_disable_lvds;
} else {
- intel_encoder->disable = gmch_disable_lvds;
+ encoder->disable = gmch_disable_lvds;
}
- intel_encoder->get_hw_state = intel_lvds_get_hw_state;
- intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_lvds_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
-
- intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = PORT_NONE;
- intel_encoder->cloneable = 0;
- if (DISPLAY_VER(dev_priv) < 4)
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->get_hw_state = intel_lvds_get_hw_state;
+ encoder->get_config = intel_lvds_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_lvds_shutdown;
+ connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(connector, encoder);
+
+ encoder->type = INTEL_OUTPUT_LVDS;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = PORT_NONE;
+ encoder->cloneable = 0;
+ if (DISPLAY_VER(i915) < 4)
+ encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
- drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
lvds_encoder->reg = lvds_reg;
- intel_lvds_add_properties(connector);
+ intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -944,56 +940,63 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
- edid = drm_get_edid_switcheroo(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- else
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
- } else {
+ mutex_lock(&i915->drm.mode_config.mutex);
+ if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) {
+ const struct edid *edid;
+
+ /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */
+ edid = drm_get_edid_switcheroo(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
+ if (edid) {
+ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ } else {
+ drm_edid = NULL;
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = drm_edid_read_ddc(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
}
- intel_connector->edid = edid;
-
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
- IS_ERR(edid) ? NULL : edid);
+ if (drm_edid) {
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ drm_edid = ERR_PTR(-ENOENT);
+ }
+ intel_bios_init_panel_late(i915, &connector->panel, NULL,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* Failed to get EDID, what about VBT? */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
/*
* If we didn't get a fixed mode from EDID or VBT, try checking
* if the panel is already turned on. If so, assume that
* whatever is currently programmed is the correct mode.
*/
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
+ if (!intel_panel_preferred_fixed_mode(connector))
goto failed;
- intel_panel_init(intel_connector);
+ intel_panel_init(connector, drm_edid);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1001,10 +1004,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
failed:
- drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
- drm_connector_cleanup(connector);
- drm_encoder_cleanup(encoder);
+ drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(&connector->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
- intel_connector_free(intel_connector);
+ intel_connector_free(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
new file mode 100644
index 000000000000..47c1832819ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_REGS_H__
+#define __INTEL_LVDS_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* LVDS port control */
+#define LVDS _MMIO(0x61180)
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN REG_BIT(31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPE_SEL_MASK REG_BIT(30)
+#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
+#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER REG_BIT(25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY REG_BIT(21)
+#define LVDS_HSYNC_POLARITY REG_BIT(20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE REG_BIT(15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
+#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6)
+#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
+#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4)
+#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
+#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2)
+#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
+#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+
+#define PCH_LVDS _MMIO(0xe1180)
+#define LVDS_DETECTED REG_BIT(1)
+
+#endif /* __INTEL_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
index 0e8248bce52d..0306ade2bc30 100644
--- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
@@ -142,7 +142,9 @@
#define FIA1_BASE 0x163000
#define FIA2_BASE 0x16E000
#define FIA3_BASE 0x16F000
-#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \
+ FIA1_BASE, FIA1_BASE,\
+ FIA2_BASE, FIA3_BASE)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 96395bfbd41d..1d0c9e247c42 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_color.h"
@@ -21,9 +22,11 @@
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
@@ -234,12 +237,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!crtc_state->hw.active && !HAS_GMCH(i915))
- return;
-
/*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
+ * We start out with underrun reporting disabled on active
+ * pipes to avoid races.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
@@ -250,19 +250,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
- crtc->cpu_fifo_underrun_disabled = true;
-
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(i915, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
+ intel_init_fifo_underrun_reporting(i915, crtc,
+ !crtc_state->hw.active &&
+ !HAS_GMCH(i915));
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -647,17 +637,14 @@ static void intel_early_display_was(struct drm_i915_private *i915)
* Also known as Wa_14010480278.
*/
if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_write(i915, GEN9_CLKGATE_DIS_0,
- intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
- if (IS_HASWELL(i915)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(i915, CHICKEN_PAR1_1,
- intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ if (IS_HASWELL(i915))
+ intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
/* Display WA #1142:kbl,cfl,cml */
@@ -698,8 +685,10 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->hw.active)
+ if (crtc_state->hw.active) {
+ intel_dmc_enable_pipe(i915, crtc->pipe);
intel_crtc_vblank_on(crtc_state);
+ }
}
intel_fbc_sanitize(i915);
@@ -721,18 +710,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_dpll_sanitize_state(i915);
- if (IS_G4X(i915)) {
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
- } else if (DISPLAY_VER(i915) >= 9) {
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
- } else if (HAS_PCH_SPLIT(i915)) {
- ilk_wm_get_hw_state(i915);
- }
+ intel_wm_get_hw_state(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e0184745632c..b8dce0576512 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -1101,41 +1101,34 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* The EDID in the OpRegion, or NULL if there is none or it's invalid.
*
*/
-struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_opregion *opregion = &i915->display.opregion;
- const void *in_edid;
- const struct edid *edid;
- struct edid *new_edid;
+ const struct drm_edid *drm_edid;
+ const void *edid;
int len;
if (!opregion->asle_ext)
return NULL;
- in_edid = opregion->asle_ext->bddc;
+ edid = opregion->asle_ext->bddc;
/* Validity corresponds to number of 128-byte blocks */
len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
- if (!len || !memchr_inv(in_edid, 0, len))
+ if (!len || !memchr_inv(edid, 0, len))
return NULL;
- edid = in_edid;
+ drm_edid = drm_edid_alloc(edid, len);
- if (len < EDID_LENGTH * (1 + edid->extensions)) {
- drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n");
- return NULL;
- }
- new_edid = drm_edid_duplicate(edid);
- if (!new_edid)
- return NULL;
- if (!drm_edid_is_valid(new_edid)) {
- kfree(new_edid);
+ if (!drm_edid_valid(drm_edid)) {
drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
- return NULL;
+ drm_edid_free(drm_edid);
+ drm_edid = NULL;
}
- return new_edid;
+
+ return drm_edid;
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 2f261f985400..d02e6696a050 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -74,7 +74,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -123,7 +123,7 @@ static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
return -ENODEV;
}
-static inline struct edid *
+static inline const struct drm_edid *
intel_opregion_get_edid(struct intel_connector *connector)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 3b1004b019a8..ce2a34a25211 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -31,12 +31,15 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
+#include <drm/drm_edid.h>
+
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
@@ -670,10 +673,13 @@ void intel_panel_init_alloc(struct intel_connector *connector)
INIT_LIST_HEAD(&panel->fixed_modes);
}
-int intel_panel_init(struct intel_connector *connector)
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid)
{
struct intel_panel *panel = &connector->panel;
+ panel->fixed_edid = fixed_edid;
+
intel_backlight_init_funcs(panel);
if (!has_drrs_modes(connector))
@@ -692,6 +698,9 @@ void intel_panel_fini(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
struct drm_display_mode *fixed_mode, *next;
+ if (!IS_ERR_OR_NULL(panel->fixed_edid))
+ drm_edid_free(panel->fixed_edid);
+
intel_backlight_destroy(panel);
intel_bios_fini_panel(panel);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 4b51e1c51da6..15a8c897b33f 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -13,13 +13,15 @@ enum drrs_type;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+struct drm_edid;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
void intel_panel_init_alloc(struct intel_connector *connector);
-int intel_panel_init(struct intel_connector *connector);
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index cecc0d007cf3..22507da0b5f0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -10,6 +10,7 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pps.h"
@@ -219,20 +220,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -266,7 +267,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -278,15 +279,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
- val &= ~PIPECONF_BPC_MASK;
+ val &= ~TRANSCONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ val |= pipeconf_val & TRANSCONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) {
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
@@ -307,7 +308,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -317,21 +317,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(dev_priv))
/* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void ilk_pch_pre_enable(struct intel_atomic_state *state,
@@ -414,7 +409,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -456,21 +451,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
/* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
+ TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
@@ -565,9 +553,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK)
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
@@ -580,20 +568,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 08a94365b7d1..f4c09cc37a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -12,19 +12,13 @@
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
@@ -467,24 +461,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->pch_ssc_use = 0;
+ dev_priv->display.dpll.pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->pch_ssc_use)
+ if (dev_priv->display.dpll.pch_ssc_use)
return;
if (has_fdi) {
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index e9774670e3f6..8d3ea8d7b737 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -72,14 +72,13 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source)
+static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
{
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
- int ret = 0;
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -121,8 +120,6 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
}
}
drm_modeset_unlock_all(&dev_priv->drm);
-
- return ret;
}
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -132,11 +129,8 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
{
bool need_stable_symbols = false;
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -200,11 +194,8 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 76be796df255..bb6ea7de5c61 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -107,7 +107,7 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->stolen_usable_size)
+ size * 2 > i915->dsm.usable_size)
return NULL;
obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7b21438edd9b..24b5b12f7732 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -13,6 +13,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pps.h"
#include "intel_quirks.h"
@@ -1534,17 +1535,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
- if (i915_mmio_reg_valid(regs.pp_div)) {
+ if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(dev_priv, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
+ else
+ intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->t11_t12, 1000)));
drm_dbg_kms(&dev_priv->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index d0d774219cc5..44610b20cd29 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -24,14 +24,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
-#include "display/intel_dp.h"
-
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
@@ -153,7 +152,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t imr_reg;
- u32 mask, val;
+ u32 mask;
if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
@@ -165,10 +164,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- val = intel_de_read(dev_priv, imr_reg);
- val &= ~psr_irq_mask_get(intel_dp);
- val |= ~mask;
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
}
static void psr_event_print(struct drm_i915_private *i915,
@@ -246,8 +242,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- u32 val;
-
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
@@ -261,9 +255,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = intel_de_read(dev_priv, imr_reg);
- val |= psr_irq_psr_error_bit_get(intel_dp);
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
schedule_work(&intel_dp->psr.work);
}
@@ -543,6 +535,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ }
+
/* Wa_22012278275:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
@@ -559,31 +559,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp, lines = 7;
-
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ u32 tmp;
- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
val |= tmp;
- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
val |= tmp;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- /*
- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
- * values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resolution mode needs to decrease
- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
- */
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= EDP_PSR2_FAST_WAKE(7);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -592,12 +582,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- /* Wa_1408330847 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK);
-
tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
@@ -638,13 +622,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
- val &= ~EDP_PSR2_IDLE_FRAME_MASK;
- val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
+ EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
@@ -709,6 +690,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 exit_scanlines;
/*
@@ -725,7 +707,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -766,13 +748,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- /* Wa_14010254185 Wa_14010103792 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
- return false;
- }
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -843,6 +818,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -937,6 +952,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -946,13 +967,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
}
- /* Wa_2209313811 */
- if (!crtc_state->enable_psr2_sel_fetch &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- goto unsupported;
- }
-
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
goto unsupported;
@@ -1072,7 +1086,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
}
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
val &= EXITLINE_MASK;
pipe_config->dc3co_exitline = val;
}
@@ -1112,6 +1126,8 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
return LATENCY_REPORTING_REMOVED_PIPE_B;
case PIPE_C:
return LATENCY_REPORTING_REMOVED_PIPE_C;
+ case PIPE_D:
+ return LATENCY_REPORTING_REMOVED_PIPE_D;
default:
MISSING_CASE(intel_dp->psr.pipe);
return 0;
@@ -1144,25 +1160,31 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (intel_dp->psr.dc3co_exitline) {
- u32 val;
-
- /*
- * TODO: if future platforms supports DC3CO in more than one
- * transcoder, EXITLINE will need to be unset when disabling PSR
- */
- val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
- val &= ~EXITLINE_MASK;
- val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
- val |= EXITLINE_ENABLE;
- intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
- }
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ if (intel_dp->psr.dc3co_exitline)
+ intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
+ intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_DISPLAY_VER(dev_priv, 12, 13)) {
+ if (crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
+ wa_16013835468_bit_get(intel_dp));
+ }
+
if (intel_dp->psr.psr2_enabled) {
if (DISPLAY_VER(dev_priv) == 9)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
@@ -1181,13 +1203,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
ADLP_1_BASED_X_GRANULARITY);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1196,20 +1211,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv)) {
- u16 vtotal, vblank;
-
- vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
- crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
- crtc_state->uapi.adjusted_mode.crtc_vblank_start;
- if (vblank > vtotal)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
- wa_16013835468_bit_get(intel_dp));
- }
}
}
@@ -1356,19 +1357,16 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
- /* Wa_1408330847 */
- if (intel_dp->psr.psr2_sel_fetch_enabled &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_DISPLAY_VER(dev_priv, 12, 13))
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.psr2_enabled) {
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1377,12 +1375,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv))
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
}
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
@@ -1540,8 +1532,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1552,10 +1544,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (plane->id == PLANE_CURSOR)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+}
+
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1566,11 +1576,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (plane->id == PLANE_CURSOR) {
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
+ if (plane->id == PLANE_CURSOR)
return;
- }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1598,9 +1605,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
@@ -1835,6 +1839,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
+ /* Wa_14014971492 */
+ if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ crtc_state->splitter.enable)
+ pipe_clip.y1 = 0;
+
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 2ac3a46cccc5..7a38a9e7fa5b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -46,12 +46,16 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 6e48d3bcdfec..a280448df771 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
/* ECS Liva Q2 */
{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ /* HP Notebook - 14-r206nv */
+ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 21805c15d5eb..e12ba458636c 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,6 +39,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -1068,7 +1069,8 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1))
return -ENXIO;
- if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ /* TX_DISABLED doesn't mean disabled for ELD */
+ if (if_index != SDVO_HBUF_INDEX_ELD && tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
@@ -1185,6 +1187,28 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
+static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ ssize_t len;
+ u8 val;
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1))
+ return;
+
+ if ((val & SDVO_AUDIO_ELD_VALID) == 0)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ crtc_state->eld, sizeof(crtc_state->eld));
+ if (len < 0)
+ drm_dbg_kms(&i915->drm, "failed to read ELD\n");
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1378,7 +1402,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_sdvo_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->limited_color_range =
intel_sdvo_limited_color_range(encoder, pipe_config,
@@ -1729,9 +1755,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
- u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
-
- if ((val & mask) == mask)
+ if (val & SDVO_AUDIO_PRESENCE_DETECT)
pipe_config->has_audio = true;
}
@@ -1742,6 +1766,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
+
+ intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
@@ -1753,12 +1779,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- u8 *eld = connector->eld;
-
- eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ const u8 *eld = crtc_state->eld;
intel_sdvo_set_audio_state(intel_sdvo, 0);
@@ -2903,7 +2924,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
mutex_unlock(&i915->drm.mode_config.mutex);
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
if (!intel_panel_preferred_fixed_mode(intel_connector))
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 9494cfd45519..1cfb94b5cedb 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -40,7 +40,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
*/
if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- i915->snps_phy_failed_calibration |= BIT(phy);
+ i915->display.snps.phy_failed_calibration |= BIT(phy);
}
}
@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_268500 = {
.clock = 268500,
.ref_control =
@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_497750 = {
.clock = 497750,
.ref_control =
@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_209800,
&dg2_hdmi_241500,
&dg2_hdmi_262750,
+ &dg2_hdmi_267300,
&dg2_hdmi_268500,
&dg2_hdmi_296703,
+ &dg2_hdmi_319890,
&dg2_hdmi_497750,
&dg2_hdmi_592000,
&dg2_hdmi_593407,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6b4d24b9cd0..a16e56a60c30 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -1217,7 +1217,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
}
intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
* The control register self-arms if the plane was previously
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b986bf075889..3b5ff84dc615 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -930,8 +930,7 @@ intel_enable_tv(struct intel_atomic_state *state,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,8 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
new file mode 100644
index 000000000000..571f5dda1e66
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via TRANSCONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on TRANSCONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
+/*
+ * Called from drm generic code, passed a 'crtc', which we use as a pipe index.
+ */
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 pixel, vbl_start, hsync_start, htotal;
+ u64 frame;
+
+ /*
+ * On i965gm TV output the frame counter only works up to
+ * the point when we enable the TV encoder. After that the
+ * frame counter ceases to work and reads zero. We need a
+ * vblank wait before enabling the TV encoder and so we
+ * have to enable vblank interrupts while the frame counter
+ * is still in a working state. However the core vblank code
+ * does not like us returning non-zero frame counter values
+ * when we've told it that we don't have a working frame
+ * counter. Thus we must stop non-zero values leaking out.
+ */
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(pipe), PIPEFRAME(pipe));
+
+ pixel = frame & PIPE_PIXEL_MASK;
+ frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff;
+
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return (frame + (pixel >= vbl_start)) & 0xffffff;
+}
+
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(pipe));
+}
+
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 htotal = mode->crtc_htotal;
+ u32 clock = mode->crtc_clock;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
+
+ /*
+ * To avoid the race condition where we might cross into the
+ * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * during the same frame.
+ */
+ do {
+ /*
+ * This field provides read back of the display
+ * pipe frame time stamp. The time stamp value
+ * is sampled at every start of vertical blank.
+ */
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+
+ /*
+ * The TIMESTAMP_CTR register has the current
+ * time stamp value.
+ */
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
+
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+ } while (scan_post_time != scan_prev_time);
+
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
+ scanline = min(scanline, vtotal - 1);
+ scanline = (scanline + vblank_start) % vtotal;
+
+ return scanline;
+}
+
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct drm_display_mode *mode;
+ struct drm_vblank_crtc *vblank;
+ enum pipe pipe = crtc->pipe;
+ int position, vtotal;
+
+ if (!crtc->active)
+ return 0;
+
+ vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ mode = &vblank->hwmode;
+
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ return __intel_get_crtc_scanline_from_timestamp(crtc);
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+
+ /*
+ * On HSW, the DSL reg (0x70000) appears to return 0 if we
+ * read it just before the start of vblank. So try it again
+ * so we don't accidentally end up spanning a vblank frame
+ * increment, causing the pipe_update_end() code to squak at us.
+ *
+ * The nature of this problem means we can't simply check the ISR
+ * bit and return the vblank start value; nor can we use the scanline
+ * debug register in the transcoder as it appears to have the same
+ * problem. We may need to extend this to include other platforms,
+ * but so far testing only shows the problem on HSW.
+ */
+ if (HAS_DDI(dev_priv) && !position) {
+ int i, temp;
+
+ for (i = 0; i < 100; i++) {
+ udelay(1);
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+ if (temp != position) {
+ position = temp;
+ break;
+ }
+ }
+ }
+
+ /*
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
+ */
+ return (position + crtc->scanline_offset) % vtotal;
+}
+
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = _crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ enum pipe pipe = crtc->pipe;
+ int position;
+ int vbl_start, vbl_end, hsync_start, htotal, vtotal;
+ unsigned long irqflags;
+ bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
+ IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled pipe %c\n",
+ pipe_name(pipe));
+ return false;
+ }
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
+ /*
+ * Lock uncore.lock, as we will do multiple timing critical raw
+ * register reads, potentially with preemption disabled, so the
+ * following code must not block on uncore.lock.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = __intel_get_crtc_scanline(crtc);
+ } else {
+ /*
+ * Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
+
+ /*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
+ * Start of vblank interrupt is triggered at start of hsync,
+ * just prior to the first active line of vblank. However we
+ * consider lines to start at the leading edge of horizontal
+ * active. So, should we get here before we've crossed into
+ * the horizontal active of the first line in vblank, we would
+ * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+ * always add htotal-hsync_start to the current pixel position.
+ */
+ position = (position + htotal - hsync_start) % vtotal;
+ }
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
+
+ if (use_scanline_counter) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ return true;
+}
+
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned long irqflags;
+ int position;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ position = __intel_get_crtc_scanline(crtc);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return position;
+}
+
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ i915_reg_t reg = PIPEDSL(pipe);
+ u32 line1, line2;
+
+ line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ msleep(5);
+ line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+
+ return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Wait for the display line to settle/start moving */
+ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), str_on_off(state));
+}
+
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, false);
+}
+
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, true);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
new file mode 100644
index 000000000000..c9fea2c2a990
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VBLANK_H__
+#define __INTEL_VBLANK_H__
+
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
+
+#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 207b2a648d32..09b32ffdc552 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -17,6 +17,7 @@
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
new file mode 100644
index 000000000000..4fd883463752
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_REGS_H__
+#define __INTEL_VDSC_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
+#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#endif /* __INTEL_VDSC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index a69bfcac9a94..286a0bdd28c6 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -6,9 +6,10 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <drm/i915_drm.h>
#include <video/vga.h>
+#include "soc/intel_gmch.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
@@ -98,39 +99,12 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
-static int
-intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
-{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- drm_err(&i915->drm, "failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
- return 0;
-
- if (enable_decode)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- drm_err(&i915->drm, "failed to write control word\n");
- return -EIO;
- }
-
- return 0;
-}
-
static unsigned int
intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
- intel_vga_set_state(i915, enable_decode);
+ intel_gmch_vga_set_state(i915, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5ff6aed9575e..4228f26b4c11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -144,17 +144,11 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* is deprecated.
*/
if (DISPLAY_VER(i915) >= 13) {
- /*
- * FIXME: Subtract Window2 delay from below value.
- *
- * Window2 specifies time required to program DSB (Window2) in
- * number of scan lines. Assuming 0 for no DSB.
- */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay;
+ crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
crtc_state->framestart_delay - 1);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
new file mode 100644
index 000000000000..bb99179cd5fd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_display_types.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->update_wm)
+ i915->display.funcs.wm->update_wm(i915);
+}
+
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_pipe_wm)
+ return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
+
+ return 0;
+}
+
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!i915->display.funcs.wm->compute_intermediate_wm)
+ return 0;
+
+ if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
+ return 0;
+
+ return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
+}
+
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->initial_watermarks) {
+ i915->display.funcs.wm->initial_watermarks(state, crtc);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->atomic_update_watermarks)
+ i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+}
+
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->optimize_watermarks)
+ i915->display.funcs.wm->optimize_watermarks(state, crtc);
+}
+
+int intel_compute_global_watermarks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_global_watermarks)
+ return i915->display.funcs.wm->compute_global_watermarks(state);
+
+ return 0;
+}
+
+void intel_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->get_hw_state)
+ return i915->display.funcs.wm->get_hw_state(i915);
+}
+
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->hw.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->hw.fb != NULL;
+ else
+ return plane_state->uapi.visible;
+}
+
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
+{
+ int level;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
+ }
+}
+
+void intel_wm_init(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 9)
+ skl_wm_init(i915);
+ else
+ i9xx_wm_init(i915);
+}
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int level;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 new[8] = { 0 };
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != dev_priv->display.wm.num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ return len;
+}
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+void intel_wm_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_pri_wm_latency_fops);
+
+ debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_spr_wm_latency_fops);
+
+ debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_cur_wm_latency_fops);
+
+ skl_watermark_debugfs_register(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
new file mode 100644
index 000000000000..48429ac140d2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_WM_H__
+#define __INTEL_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_update_watermarks(struct drm_i915_private *i915);
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_global_watermarks(struct intel_atomic_state *state);
+void intel_wm_get_hw_state(struct drm_i915_private *i915);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *i915,
+ const char *name, const u16 wm[]);
+void intel_wm_init(struct drm_i915_private *i915);
+void intel_wm_debugfs_register(struct drm_i915_private *i915);
+
+#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h
index 211632f58751..628b7c0ce484 100644
--- a/drivers/gpu/drm/i915/intel_pm_types.h
+++ b/drivers/gpu/drm/i915/display/intel_wm_types.h
@@ -3,12 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#ifndef __INTEL_PM_TYPES_H__
-#define __INTEL_PM_TYPES_H__
+#ifndef __INTEL_WM_TYPES_H__
+#define __INTEL_WM_TYPES_H__
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "intel_display_limits.h"
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
@@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-#endif /* __INTEL_PM_TYPES_H__ */
+#endif /* __INTEL_WM_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 01e881293612..473d53610b92 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -87,6 +87,10 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
#define ICL_MAX_SRC_H 4096
#define ICL_MAX_DST_W 5120
#define ICL_MAX_DST_H 4096
+#define TGL_MAX_SRC_W 5120
+#define TGL_MAX_SRC_H 8192
+#define TGL_MAX_DST_W 8192
+#define TGL_MAX_DST_H 8192
#define MTL_MAX_SRC_W 4096
#define MTL_MAX_SRC_H 8192
#define MTL_MAX_DST_W 8192
@@ -173,11 +177,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
max_src_h = SKL_MAX_SRC_H;
max_dst_w = SKL_MAX_DST_W;
max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 14) {
+ } else if (DISPLAY_VER(dev_priv) < 12) {
max_src_w = ICL_MAX_SRC_W;
max_src_h = ICL_MAX_SRC_H;
max_dst_w = ICL_MAX_DST_W;
max_dst_h = ICL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 14) {
+ max_src_w = TGL_MAX_SRC_W;
+ max_src_h = TGL_MAX_SRC_H;
+ max_dst_w = TGL_MAX_DST_W;
+ max_dst_h = TGL_MAX_DST_H;
} else {
max_src_w = MTL_MAX_SRC_W;
max_src_h = MTL_MAX_SRC_H;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 4b79c2d2d617..ce55b8f09301 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -642,7 +642,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1260,7 +1260,7 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
}
static void
@@ -1287,6 +1287,8 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
@@ -1627,7 +1629,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < 1) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);
@@ -2180,7 +2182,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
if (DISPLAY_VER(i915) < 12)
return false;
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ /* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
return false;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index ae4e9e680c2e..f0af997d2a23 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -5,6 +5,10 @@
#include <drm/drm_blend.h>
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
@@ -13,13 +17,9 @@
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "skl_watermark.h"
-
-#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_reg.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
static void skl_sagv_disable(struct drm_i915_private *i915);
@@ -64,7 +64,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
static bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ return HAS_SAGV(i915) &&
i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
}
@@ -92,7 +92,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
return val;
} else if (DISPLAY_VER(i915) == 11) {
return 10;
- } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ } else if (HAS_SAGV(i915)) {
return 30;
} else {
return 0;
@@ -101,7 +101,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!intel_has_sagv(i915))
+ if (!HAS_SAGV(i915))
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
@@ -359,7 +359,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(i915);
+ for (level = i915->display.wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -710,10 +710,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ int level;
ret = skl_compute_wm_params(crtc_state, 256,
drm_format_info(DRM_FORMAT_ARGB8888),
@@ -722,7 +722,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
crtc_state->pixel_rate, &wp, 0);
drm_WARN_ON(&i915->drm, ret);
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
unsigned int latency = i915->display.wm.skl_latency[level];
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
@@ -1407,16 +1407,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+ const struct skl_plane_wm *wm)
{
/*
* Wa_1408961008:icl, ehl
* Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
+ * Wa_14017887344:icl
+ * Wa_14017868169:adl, tgl
+ * Due to some power saving optimizations, different subsystems
+ * like PSR, might still use even disabled wm level registers,
+ * for "reference", so lets keep at least the values sane.
+ * Considering amount of WA requiring us to do similar things, was
+ * decided to simply do it for all of the platforms, as those wm
+ * levels are disabled, this isn't going to do harm anyway.
*/
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+ return level > 0 && !wm->wm[level].enable;
}
struct skl_plane_ddb_iter {
@@ -1492,7 +1498,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1568,7 +1574,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for (level++; level < i915->display.wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1585,11 +1591,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ if (skl_need_wm_copy_wa(i915, level, wm)) {
+ wm->wm[level].blocks = wm->wm[level - 1].blocks;
+ wm->wm[level].lines = wm->wm[level - 1].lines;
+ wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
}
}
}
@@ -1966,10 +1971,10 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
struct skl_wm_level *levels)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level *result_prev = &levels[0];
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
unsigned int latency = i915->display.wm.skl_latency[level];
@@ -2247,7 +2252,6 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -2255,8 +2259,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2284,14 +2289,14 @@ void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, CUR_WM(pipe, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2323,9 +2328,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2397,6 +2402,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2673,9 +2680,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *new_pipe_wm)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2754,6 +2761,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2809,16 +2818,14 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int level, max_level;
enum plane_id plane_id;
+ int level;
u32 val;
- max_level = ilk_wm_max_level(i915);
-
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
else
@@ -2855,7 +2862,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
@@ -2955,7 +2962,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
@@ -2991,6 +2998,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915)
}
}
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+}
+
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state)
{
@@ -3001,9 +3014,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
struct skl_pipe_wm wm;
} *hw;
const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(i915);
struct intel_plane *plane;
u8 hw_enabled_slices;
+ int level;
if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
return;
@@ -3030,7 +3043,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
@@ -3152,7 +3165,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915)
static void
adjust_wm_latency(struct drm_i915_private *i915,
- u16 wm[], int max_level, int read_latency)
+ u16 wm[], int num_levels, int read_latency)
{
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3162,12 +3175,12 @@ adjust_wm_latency(struct drm_i915_private *i915,
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < num_levels; level++) {
if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
+ for (i = level + 1; i < num_levels; i++)
wm[i] = 0;
- max_level = level - 1;
+ num_levels = level;
break;
}
}
@@ -3180,7 +3193,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < num_levels; level++)
wm[level] += read_latency;
}
@@ -3196,7 +3209,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
u32 val;
val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
@@ -3211,12 +3224,12 @@ static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, max_level, 6);
+ adjust_wm_latency(i915, wm, num_levels, 6);
}
static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
int mult = IS_DG2(i915) ? 2 : 1;
u32 val;
@@ -3248,11 +3261,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, max_level, read_latency);
+ adjust_wm_latency(i915, wm, num_levels, read_latency);
}
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
+ if (HAS_HW_SAGV_WM(i915))
+ i915->display.wm.num_levels = 6;
+ else
+ i915->display.wm.num_levels = 8;
+
if (DISPLAY_VER(i915) >= 14)
mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
else
@@ -3263,6 +3281,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915)
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
@@ -3540,13 +3559,34 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
.write = skl_watermark_ipc_status_write
};
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+static int intel_sagv_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ static const char * const sagv_status[] = {
+ [I915_SAGV_UNKNOWN] = "unknown",
+ [I915_SAGV_DISABLED] = "disabled",
+ [I915_SAGV_ENABLED] = "enabled",
+ [I915_SAGV_NOT_CONTROLLED] = "not controlled",
+ };
+
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
+
+void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- if (!HAS_IPC(i915))
- return;
+ if (HAS_IPC(i915))
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ if (HAS_SAGV(i915))
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
+ &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 7a5a4e67cd73..f91a3d4ddc07 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,9 +8,9 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct intel_atomic_state;
@@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
-void skl_wm_get_hw_state(struct drm_i915_private *i915);
-void skl_wm_sanitize(struct drm_i915_private *i915);
-
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state);
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_debugfs_register(struct drm_i915_private *i915);
void skl_wm_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 662bdb656aa3..8d2e6e151ba0 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
bool cold_boot = false;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- intel_de_write(dev_priv, MIPI_CTRL(port),
- tmp | GLK_MIPIIO_ENABLE);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
- tmp &= ~GLK_LP_WAKE;
- else
- tmp |= GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
@@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
}
/* Get IO out of reset */
- val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
- val | GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
@@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_EXIT | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
-
- val = intel_de_read(dev_priv, MIPI_CTRL(port));
- val &= ~GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK,
+ ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
}
}
@@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
- intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
@@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Enter ULPS */
- for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
}
/* Clear MIPI mode */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~GLK_MIPIIO_ENABLE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
- u32 val;
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
DEVICE_READY | ULPS_STATE_ENTER);
@@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
@@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- u32 temp;
+ u32 temp = intel_dsi->pixel_overlap;
+
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- temp = intel_de_read(dev_priv,
- MIPI_CTRL(port));
- temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- BXT_PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, MIPI_CTRL(port),
- temp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIXEL_OVERLAP_CNT_MASK,
+ temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
- temp &= ~PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
+ intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ PIXEL_OVERLAP_CNT_MASK,
+ temp << PIXEL_OVERLAP_CNT_SHIFT);
}
}
@@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 temp;
/* de-assert ip_tg_enable signal */
- temp = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -787,7 +741,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
- u32 val;
bool glk_cold_boot = false;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -810,9 +763,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
@@ -820,12 +771,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 val;
-
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ 0, DPOUNIT_CLOCK_GATE_DISABLE);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -949,7 +897,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -987,21 +934,16 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
} else {
- u32 val;
-
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
/* Assert reset */
@@ -1058,7 +1000,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -1432,11 +1374,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~BXT_PIPE_SELECT_MASK;
-
- tmp |= BXT_PIPE_SELECT(pipe);
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
@@ -1605,7 +1544,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
if (IS_GEMINILAKE(dev_priv))
return;
@@ -1620,9 +1558,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
vlv_dsi_reset_clocks(encoder, port);
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
@@ -1983,7 +1919,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err_cleanup_connector;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index af7402127cd9..b697badbbe71 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
@@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val |= BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
@@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
- tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
- tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b3b398fe689c..385ffc575b48 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,6 +8,7 @@
#include "display/intel_frontbuffer.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_sw_fence_work.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 005a7f842784..e76c9703680e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -5,6 +5,7 @@
#include <drm/drm_fourcc.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9969e687ad85..497de40b8e68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -4,6 +4,7 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 4f69bff63068..d3c1dee16af2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -697,7 +697,7 @@ insert:
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
- drm_vma_node_allow(&mmo->vma_node, file);
+ drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo;
err:
@@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
i915_gem_object_put(obj);
return -EINVAL;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
@@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 7ab9e98bcbd2..d8e06e783e30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -110,9 +110,7 @@ static int adjust_stolen(struct drm_i915_private *i915,
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_res =
- (struct resource) DEFINE_RES_MEM(ggtt_start,
- ggtt_total_entries(ggtt) * 4);
+ ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
stolen[0].end = ggtt_res.start;
@@ -211,7 +209,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
IS_GM45(i915) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
IS_GM45(i915) ? "CTG" : "ELK", reg_val);
@@ -276,7 +274,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -365,7 +363,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -414,7 +412,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
/*
- * Initialize i915->dsm_reserved to contain the reserved space within the Data
+ * Initialize i915->dsm.reserved to contain the reserved space within the Data
* Stolen Memory. This is a range on the top of DSM that is reserved, not to
* be used by driver, so must be excluded from the region passed to the
* allocator later. In the spec this is also called as WOPCM.
@@ -430,7 +428,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
resource_size_t reserved_size;
int ret = 0;
- stolen_top = i915->dsm.end + 1;
+ stolen_top = i915->dsm.stolen.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
@@ -471,13 +469,12 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
goto bail_out;
}
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
drm_err(&i915->drm,
"Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ &i915->dsm.reserved, &i915->dsm.stolen);
ret = -EINVAL;
goto bail_out;
}
@@ -485,8 +482,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
return 0;
bail_out:
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, 0);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
return ret;
}
@@ -517,27 +513,27 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
if (request_smem_stolen(i915, &mem->region))
return -ENOSPC;
- i915->dsm = mem->region;
+ i915->dsm.stolen = mem->region;
if (init_reserved_stolen(i915))
return -ENOSPC;
/* Exclude the reserved region from driver use */
- mem->region.end = i915->dsm_reserved.start - 1;
+ mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region));
- i915->stolen_usable_size = resource_size(&mem->region);
+ i915->dsm.usable_size = resource_size(&mem->region);
drm_dbg(&i915->drm,
"Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- (u64)i915->stolen_usable_size >> 10);
+ (u64)resource_size(&i915->dsm.stolen) >> 10,
+ (u64)i915->dsm.usable_size >> 10);
- if (i915->stolen_usable_size == 0)
+ if (i915->dsm.usable_size == 0)
return -ENOSPC;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
return 0;
}
@@ -587,7 +583,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -607,7 +603,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
sg_dma_len(sg) = size;
return st;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 8cfed1bef629..341b94672abc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -274,8 +274,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
bdev);
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
unsigned long ccs_pages = 0;
enum ttm_caching caching;
@@ -289,8 +287,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (!i915_tt)
return NULL;
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
- man->use_tt)
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+ ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj);
@@ -474,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -513,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -1058,7 +1062,27 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (!i915_ttm_resource_mappable(bo->resource)) {
+ /*
+ * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+ * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+ * far as far doing a ttm_bo_move_null(), which should skip all the
+ * other junk.
+ */
+ if (!bo->resource) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true, /* should be idle already */
+ };
+ int err;
+
+ GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+ } else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 2a94a99ef76b..f8f6bed1b297 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != I915_PL_SYSTEM;
+ return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 2ebaaf4d663c..d030182ca176 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level;
+ unsigned int mem_flags;
unsigned int i;
+ int mem_type;
+
+ /*
+ * We might have been purged (or swapped out) if the resource is NULL,
+ * in which case the SYSTEM placement is the closest match to describe
+ * the current domain. If the object is ever used in this state then we
+ * will require moving it again.
+ */
+ if (!bo->resource) {
+ mem_flags = I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = I915_PL_SYSTEM;
+ cache_level = I915_CACHE_NONE;
+ } else {
+ mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+ I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = bo->resource->mem_type;
+ cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+ bo->ttm);
+ }
/*
* If object was moved to an allowable region, update the object
@@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
* in an allowable region, it's evicted and we don't update the
* object region.
*/
- if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
for (i = 0; i < obj->mm.n_placements; ++i) {
struct intel_memory_region *mr = obj->mm.placements[i];
- if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ if (intel_region_to_ttm_type(mr) == mem_type &&
mr != obj->mm.region) {
i915_gem_object_release_memory_region(obj);
i915_gem_object_init_memory_region(obj, mr);
@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
}
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+ obj->mem_flags |= mem_flags;
- obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
- I915_BO_FLAG_STRUCT_PAGE;
-
- cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
- bo->ttm);
i915_gem_object_set_cache_coherency(obj, cache_level);
}
@@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
+ if (!bo->resource) {
+ if (dst_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ /*
+ * This is only reached when first creating the object, or if
+ * the object was purged or swapped out (pipeline-gutting). For
+ * the former we can safely skip all of the below since we are
+ * only using a dummy SYSTEM placement here. And with the latter
+ * we will always re-enter here with bo->resource set correctly
+ * (as per the above), since this is part of a multi-hop
+ * sequence, where at the end we can do the move for real.
+ *
+ * The special case here is when the dst_mem is TTM_PL_SYSTEM,
+ * which doens't require any kind of move, so it should be safe
+ * to skip all the below and call ttm_bo_move_null() here, where
+ * the caller in __i915_ttm_get_pages() will take care of the
+ * rest, since we should have a valid ttm_tt.
+ */
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+
ret = i915_ttm_move_notify(bo);
if (ret)
return ret;
@@ -669,6 +711,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
+
+ if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
+ return -EINVAL;
+
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 7e67742bc65e..dfe39c8e74d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
- if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!backup_bo->resource)
+ err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
+ if (!err)
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 3c573d41d404..1bbe6708d0a7 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -4150,17 +4150,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
@@ -4171,8 +4160,8 @@ void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
+ drm_printf(m, "\tOn hold?: %zu\n",
+ list_count_nodes(&engine->sched_engine->hold));
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 0c7fe360f873..842e69c7b21e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -12,6 +12,7 @@
#include <drm/i915_drm.h>
#include <drm/intel-gtt.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_ggtt_gmch.h"
@@ -885,8 +886,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
- return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7ac8ed13e1fe..37d0b0fe791d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 0e3630103693..77c793812eb4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -80,7 +80,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -88,8 +88,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+ ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 89ccb95e146c..256cecf99dd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -28,7 +28,6 @@
#include "intel_migrate.h"
#include "intel_mocs.h"
#include "intel_pci_config.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 56b993f6e7dc..e02cb90723ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -17,7 +17,6 @@
#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 97a60941eee5..4aecb5a7b631 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -9,8 +9,6 @@
#include "i915_reg_defs.h"
#include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */
-#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
-
/*
* The perf control registers are technically multicast registers, but the
* driver never needs to read/write them directly; we only use them to build
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 6184fcc16987..f4150f61f39c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -301,7 +301,7 @@ static int chv_rc6_init(struct intel_rc6 *rc6)
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
- paddr = i915->dsm.end + 1 - pctx_size;
+ paddr = i915->dsm.stolen.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -325,7 +325,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
/* BIOS set it up already, grab the pre-alloc'd space */
resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.stolen.start;
pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
pcbr_offset,
pctx_size,
@@ -354,10 +354,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
}
GEM_BUG_ON(range_overflows_end_t(u64,
- i915->dsm.start,
+ i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
- pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ pctx_paddr = i915->dsm.stolen.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
out:
@@ -448,8 +448,8 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
*/
rc6_ctx_base =
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
- rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ if (!(rc6_ctx_base >= i915->dsm.reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm.reserved.end)) {
drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f3ad93db0b21..89fdfc67f8d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -158,7 +158,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1(uncore->i915))
return false;
*start = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 9ad3bc7201cb..4d0dc9de23f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
@@ -1676,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1685,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- i915->mem_freq = 800;
- break;
- case 2:
- i915->mem_freq = 1066;
- break;
- case 3:
- i915->mem_freq = 1333;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1726,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps)
static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1735,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_cck_read(i915, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- i915->mem_freq = 2000;
- break;
- default:
- i915->mem_freq = 1600;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9e1cad9ba0e9..c622962c6bef 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -12,6 +12,9 @@
struct i915_request;
struct drm_printer;
+#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
+
void intel_rps_init_early(struct intel_rps *rps);
void intel_rps_init(struct intel_rps *rps);
void intel_rps_sanitize(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 60e9cf273c5e..e7ee24bcad89 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1474,43 +1474,10 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gen12_gt_workarounds_init(gt, wal);
-
- /* Wa_1409420604:tgl */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
-
- /* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
- /* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
-}
-
-static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
-
gen12_gt_workarounds_init(gt, wal);
- /* Wa_1607087056:dg1 */
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
/* Wa_1409420604:dg1 */
wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
@@ -1814,8 +1781,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
- else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
@@ -2232,20 +2197,6 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
}
}
-static void dg1_whitelist_build(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- tgl_whitelist_build(engine);
-
- /* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
- (engine->class == RENDER_CLASS ||
- engine->class == COPY_ENGINE_CLASS))
- whitelist_reg_ext(w, RING_ID(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
-}
-
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -2334,8 +2285,6 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
; /* none needed */
- else if (IS_DG1(i915))
- dg1_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2526,27 +2475,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
true);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1607138336:tgl[a0],dg1[a0]
- * Wa_1607063988:tgl[a0],dg1[a0]
- */
- wa_write_or(wal,
- GEN9_CTX_PREEMPT_REG,
- GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- }
-
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1606679103:tgl
- * (see also Wa_1606682166:icl)
- */
- wa_write_or(wal,
- GEN7_SARCHKMD,
- GEN7_DISABLE_SAMPLER_PREFETCH);
- }
-
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
@@ -2576,30 +2504,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
+ /* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
- */
+ /* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
- * Wa_1607030317:tgl
- * Wa_1607186500:tgl
- * Wa_1607297627:tgl,rkl,dg1[a0],adlp
+ * Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
- * For DG1 this only applies to A0.
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index cfd736d88939..779fadcec7c4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -3,7 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include "intel_pm.h" /* intel_gpu_freq() */
#include "selftest_llc.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 37c38bdd5f47..a9e0a91bc0e0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -20,7 +20,7 @@ __igt_reset_stolen(struct intel_gt *gt,
const char *msg)
{
struct i915_ggtt *ggtt = gt->ggtt;
- const struct resource *dsm = &gt->i915->dsm;
+ const struct resource *dsm = &gt->i915->dsm.stolen;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0ebf5fbf0e39..3c4ae1da0d41 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -49,6 +49,7 @@
#include "i915_pvinfo.h"
#include "trace.h"
+#include "display/intel_display.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 9f1c209d9251..baccbf1761b7 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -147,9 +147,25 @@ vgpu_scan_nonprivbb_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
- vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
- "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
+ vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
+ "0x%llx\n");
+
+static int vgpu_status_get(void *data, u64 *val)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+ *val = 0;
+
+ if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
&vgpu_mmio_diff_fops);
- debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
- &vgpu_scan_nonprivbb_fops);
+ debugfs_create_file_unsafe("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
+ debugfs_create_file_unsafe("status", 0644, vgpu->debugfs, vgpu,
+ &vgpu_status_fops);
}
/**
@@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
- debugfs_remove_recursive(vgpu->debugfs);
- vgpu->debugfs = NULL;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root && gvt->debugfs_root) {
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+ }
}
/**
@@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- debugfs_remove_recursive(gvt->debugfs_root);
- gvt->debugfs_root = NULL;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root) {
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c033249e73f4..e0c5dfb788eb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
@@ -62,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -78,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -186,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
- ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE);
+ vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &=
+ ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK;
@@ -247,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -505,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -583,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
* @turnon: Turn ON/OFF vblank_timer
*
* This function is used to turn on/off or update the per-vGPU vblank_timer
- * when PIPECONF is enabled or disabled. vblank_timer period is also updated
+ * when TRANSCONF is enabled or disabled. vblank_timer period is also updated
* if guest changed the refresh rate.
*
*/
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 7af09eb24ac0..6834f9fe40cf 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -136,7 +136,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
- if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+ !list_empty(&vgpu->dmabuf_obj_list_head)) {
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 0daa3931aef7..4eff44194439 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,7 +38,7 @@
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a683c22d5b64..4dd52ac2043e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -45,7 +45,7 @@ struct gvt_firmware_header {
u64 cfg_space_offset; /* offset in the file */
u64 mmio_size;
u64 mmio_offset; /* offset in the file */
- unsigned char data[1];
+ unsigned char data[];
};
#define dev_to_drm_minor(d) dev_get_drvdata((d))
@@ -77,7 +77,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
unsigned long size, crc32_start;
int ret;
- size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
+ size = offsetof(struct gvt_firmware_header, data) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
@@ -171,7 +171,7 @@ static int verify_firmware(struct intel_gvt *gvt,
mem = (fw->data + h->cfg_space_offset);
id = *(u16 *)(mem + PCI_VENDOR_ID);
- VERIFY("vender id", id, pdev->vendor);
+ VERIFY("vendor id", id, pdev->vendor);
id = *(u16 *)(mem + PCI_DEVICE_ID);
VERIFY("device id", id, pdev->device);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 51e5e8fb505b..4ec85308379a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
int idx;
bool ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return false;
idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
@@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
- if (ret) {
- ppgtt_invalidate_spt(spt);
- return ret;
- }
+ if (ret)
+ goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
+err:
+ /* Cancel the existing addess mappings of DMA addr. */
+ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(sub_spt, &sub_se);
+ }
+ /* Release the new allocated spt. */
+ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+ sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+ ppgtt_free_spt(sub_spt);
+ return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62823c0e13ab..2d65800d8e93 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+enum {
+ INTEL_VGPU_STATUS_ATTACHED = 0,
+ INTEL_VGPU_STATUS_ACTIVE,
+ INTEL_VGPU_STATUS_NR_BITS,
+};
+
struct intel_vgpu {
struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- bool active;
- bool attached;
+ DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
- for_each_if(vgpu->active)
+ for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 735fc83e7026..3c8e0d198c4f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -666,8 +666,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -697,12 +697,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & PIPECONF_ENABLE) {
- vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
+ if (data & TRANSCONF_ENABLE) {
+ vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
vgpu_update_refresh_rate(vgpu);
vgpu_update_vblank_emulation(vgpu, true);
} else {
- vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
+ vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
vgpu_update_vblank_emulation(vgpu, false);
}
return 0;
@@ -2262,10 +2262,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index a6b2021b665f..68eca023bbc6 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f5451adcd489..de675d799c7d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!itr->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
continue;
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (vgpu->attached)
- return -EEXIST;
-
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
- vgpu->attached = true;
-
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
+ set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -698,12 +695,11 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (!vgpu->attached)
- return;
-
intel_gvt_release_vgpu(vgpu);
- debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
+ clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
+ debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
vgpu->dma_addr_cache = RB_ROOT;
intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
- vgpu->attached = false;
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
- if (WARN_ON_ONCE(vgpu->attached))
- return;
-
vfio_unregister_group_dev(&vgpu->vfio_device);
vfio_put_device(&vgpu->vfio_device);
}
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
- return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+ return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
struct gvt_dma *entry;
int ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
struct gvt_dma *entry;
int ret = 0;
- if (!vgpu->attached)
- return -ENODEV;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
{
struct gvt_dma *entry;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return;
mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
- if (vgpu->active)
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
intel_vgpu_emulate_vblank(vgpu);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9cd8fcbf7cad..f4055804aad1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
+ intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
@@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
goto out;
}
- if (!scheduler->current_vgpu->active ||
+ if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+ scheduler->current_vgpu->status) ||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3c529c2705dd..08ad1bd651f1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = true;
- mutex_unlock(&vgpu->vgpu_lock);
+ set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
}
/**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = false;
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
- drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+ "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
if (ret)
goto out_free_vgpu;
- vgpu->active = false;
-
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
return vgpu;
out_free_vgpu:
@@ -325,7 +323,7 @@ int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_unlock;;
+ goto out_unlock;
vgpu->id = ret;
vgpu->sched_ctl.weight = conf->weight;
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index afb828dab53b..24e5bb8a670e 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -3,7 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <linux/kernel.h>
+
+#include "i915_config.h"
+#include "i915_utils.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_config.h b/drivers/gpu/drm/i915/i915_config.h
new file mode 100644
index 000000000000..10e18b036489
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_config.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_CONFIG_H__
+#define __I915_CONFIG_H__
+
+#include <linux/types.h>
+#include <linux/limits.h>
+
+struct drm_i915_private;
+
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+ u64 context);
+
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return i915_fence_context_timeout(i915, U64_MAX);
+}
+
+#endif /* __I915_CONFIG_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a356ca490159..16011c0286ad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -52,7 +52,6 @@
#include "i915_irq.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -648,13 +647,14 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
+
static int
gt_drop_caches(struct intel_gt *gt, u64 val)
{
int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ wait_for(intel_engines_are_idle(gt), 200))
intel_gt_set_wedged(gt);
if (val & DROP_RETIRE)
@@ -762,7 +762,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
-#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
static const struct i915_debugfs_files {
const char *name;
@@ -795,6 +794,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
}
drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
+ ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 783c8676eee2..614bde321589 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -230,27 +230,16 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
&i915_param_charp_fops);
}
-static __always_inline void
-_i915_param_create_file(struct dentry *parent, const char *name,
- const char *type, int mode, void *value)
-{
- if (!mode)
- return;
-
- if (!__builtin_strcmp(type, "bool"))
- debugfs_create_bool(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "int"))
- i915_debugfs_create_int(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned int"))
- i915_debugfs_create_uint(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned long"))
- debugfs_create_ulong(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "char *"))
- i915_debugfs_create_charp(name, mode, parent, value);
- else
- WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
- type, name);
-}
+#define _i915_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool *: debugfs_create_bool, \
+ int *: i915_debugfs_create_int, \
+ unsigned int *: i915_debugfs_create_uint, \
+ unsigned long *: debugfs_create_ulong, \
+ char **: i915_debugfs_create_charp)(name, mode, parent, valp); \
+ } while(0)
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
@@ -269,7 +258,7 @@ struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
* just let the generic create file fail silently with -EEXIST.
*/
-#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, mode, &params->x);
I915_PARAMS_FOR_EACH(REGISTER);
#undef REGISTER
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 19983b77118d..a53fd339e2cc 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
@@ -78,6 +77,7 @@
#include "pxp/intel_pxp_pm.h"
#include "soc/intel_dram.h"
+#include "soc/intel_gmch.h"
#include "i915_file_private.h"
#include "i915_debugfs.h"
@@ -107,141 +107,6 @@
static const struct drm_driver i915_drm_driver;
-static void i915_release_bridge_dev(struct drm_device *dev,
- void *bridge)
-{
- pci_dev_put(bridge);
-}
-
-static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
-{
- int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
-
- dev_priv->bridge_dev =
- pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
- if (!dev_priv->bridge_dev) {
- drm_err(&dev_priv->drm, "bridge device not found\n");
- return -EIO;
- }
-
- return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
- dev_priv->bridge_dev);
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
-{
- int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret;
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
- return 0;
-#endif
-
- /* Get some space for it */
- dev_priv->mch_res.name = "i915 MCHBAR";
- dev_priv->mch_res.flags = IORESOURCE_MEM;
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
- &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- return ret;
- }
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
- return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool enabled;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = false;
-
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- return;
-
- if (intel_alloc_mchbar_resource(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-}
-
-static void
-intel_teardown_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
- if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- u32 deven_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
- &deven_val);
- deven_val &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- deven_val);
- } else {
- u32 mchbar_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
- &mchbar_val);
- mchbar_val &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
- mchbar_val);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
/*
@@ -302,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+ pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -383,10 +250,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_pm_setup(dev_priv);
- ret = intel_power_domains_init(dev_priv);
- if (ret < 0)
- goto err_gem;
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
@@ -395,10 +258,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
return 0;
-err_gem:
- i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release_all(dev_priv);
- i915_drm_clients_fini(&dev_priv->clients);
err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
@@ -447,7 +306,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- ret = i915_get_bridge_dev(dev_priv);
+ ret = intel_gmch_bridge_setup(dev_priv);
if (ret < 0)
return ret;
@@ -464,7 +323,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
}
/* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
+ intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
for_each_gt(gt, dev_priv, i) {
@@ -479,7 +338,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
return ret;
}
@@ -490,7 +349,7 @@ err_uncore:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
}
/**
@@ -1077,10 +936,9 @@ static void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- intel_fbdev_restore_mode(dev);
+ intel_fbdev_restore_mode(i915);
- if (HAS_DISPLAY(i915))
- vga_switcheroo_process_delayed_switch();
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1144,7 +1002,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
- intel_dmc_ucode_suspend(i915);
+ intel_dmc_suspend(i915);
i915_gem_suspend(i915);
@@ -1174,6 +1032,13 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static void i915_drm_complete(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_resume_complete(i915->pxp);
+}
+
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
@@ -1214,8 +1079,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev_priv);
-
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
@@ -1229,7 +1092,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_dmc_ucode_suspend(dev_priv);
+ intel_dmc_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1350,7 +1213,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
- intel_dmc_ucode_resume(dev_priv);
+ intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
@@ -1374,8 +1237,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_pxp_resume(dev_priv->pxp);
-
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
intel_hpd_init(dev_priv);
@@ -1567,6 +1428,16 @@ static int i915_pm_resume(struct device *kdev)
return i915_drm_resume(&i915->drm);
}
+static void i915_pm_complete(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return;
+
+ i915_drm_complete(&i915->drm);
+}
+
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
@@ -1787,6 +1658,7 @@ const struct dev_pm_ops i915_pm_ops = {
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
+ .complete = i915_pm_complete,
/*
* S4 event handlers
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eaf738698149..6254aa977398 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,7 +36,7 @@
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
@@ -65,26 +65,41 @@
#include "intel_uncore.h"
struct drm_i915_clock_gating_funcs;
-struct drm_i915_gem_object;
-struct drm_i915_private;
-struct intel_connector;
-struct intel_dp;
-struct intel_encoder;
-struct intel_limit;
-struct intel_overlay_error_state;
struct vlv_s0ix_state;
struct intel_pxp;
-#define I915_GEM_GPU_DOMAINS \
- (I915_GEM_DOMAIN_RENDER | \
- I915_GEM_DOMAIN_SAMPLER | \
- I915_GEM_DOMAIN_COMMAND | \
- I915_GEM_DOMAIN_INSTRUCTION | \
- I915_GEM_DOMAIN_VERTEX)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
-#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
+struct i915_dsm {
+ /*
+ * The start and end of DSM which we can optionally use to create GEM
+ * objects backed by stolen memory.
+ *
+ * Note that usable_size tells us exactly how much of this we are
+ * actually allowed to use, given that some portion of it is in fact
+ * reserved for use by hardware functions.
+ */
+ struct resource stolen;
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+ /*
+ * Reserved portion of DSM.
+ */
+ struct resource reserved;
+
+ /*
+ * Total size minus reserved ranges.
+ *
+ * DSM is segmented in hardware with different portions offlimits to
+ * certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of DSM is reserved for hardware
+ * functions and similarly removed from the accessible range.
+ */
+ resource_size_t usable_size;
+};
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -163,19 +178,6 @@ struct i915_gem_mm {
u32 shrink_count;
};
-#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
-
-unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
- u64 context);
-
-static inline unsigned long
-i915_fence_timeout(const struct drm_i915_private *i915)
-{
- return i915_fence_context_timeout(i915, U64_MAX);
-}
-
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -205,29 +207,7 @@ struct drm_i915_private {
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
- /**
- * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
- * end of stolen which we can optionally use to create GEM objects
- * backed by stolen memory. Note that stolen_usable_size tells us
- * exactly how much of this we are actually allowed to use, given that
- * some portion of it is in fact reserved for use by hardware functions.
- */
- struct resource dsm;
- /**
- * Reseved portion of Data Stolen Memory
- */
- struct resource dsm_reserved;
-
- /*
- * Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
+ struct i915_dsm dsm;
struct intel_uncore uncore;
struct intel_uncore_mmio_debug mmio_debug;
@@ -236,13 +216,15 @@ struct drm_i915_private {
struct intel_gvt *gvt;
- struct pci_dev *bridge_dev;
+ struct {
+ struct pci_dev *pdev;
+ struct resource mch_res;
+ bool mchbar_need_disable;
+ } gmch;
struct rb_root uabi_engines;
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- struct resource mch_res;
-
/* protects the irq masks */
spinlock_t irq_lock;
@@ -288,8 +270,6 @@ struct drm_i915_private {
struct i915_gem_mm mm;
- bool mchbar_need_disable;
-
struct intel_l3_parity l3_parity;
/*
@@ -300,14 +280,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
- /*
- * Shadows for CHV DPLL_MD regs to keep the state
- * checker somewhat working in the presence hardware
- * crappiness (can't read out DPLL_MD for pipes B & C).
- */
- u32 chv_dpll_md[I915_MAX_PIPES];
- u32 bxt_phy_grc;
-
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
@@ -368,19 +340,11 @@ struct drm_i915_private {
struct intel_pxp *pxp;
- u8 pch_ssc_use;
-
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
bool irq_enabled;
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
-
struct i915_pmu pmu;
struct i915_drm_clients clients;
@@ -469,9 +433,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
-#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
-#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
-
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
@@ -619,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
#define IS_ADLP_RPLP(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_RPLU(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -692,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
- (IS_TGL_UY(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GRAPHICS_STEP(p, since, until) \
- (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_DG1_DISPLAY_STEP(p, since, until) \
- (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
-
#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
@@ -887,6 +837,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -912,7 +865,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv))
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
@@ -944,9 +898,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
-#define GT_FREQUENCY_MULTIPLIER 50
-#define GEN9_FREQ_SCALER 3
-
#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a5cdf6662d01..82e9d289398c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -39,6 +39,13 @@ struct i915_gem_ww_ctx;
struct i915_gtt_view;
struct i915_vma;
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
+
void i915_gem_init_early(struct drm_i915_private *i915);
void i915_gem_cleanup_early(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 243419783052..3d77679bf211 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -18,6 +18,8 @@ struct drm_i915_gem_object;
struct i915_address_space;
struct i915_gem_ww_ctx;
+#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 1225bc432f0d..596dd2c07010 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -99,20 +99,6 @@ hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
return mul_u64_u32_shr(reg_value, scale_factor, nshift);
}
-static void
-hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr,
- int nshift, unsigned int scale_factor, long lval)
-{
- u32 nval;
-
- /* Computation in 64-bits to avoid overflow. Round to nearest. */
- nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor);
-
- hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr,
- PKG_PWR_LIM_1,
- REG_FIELD_PREP(PKG_PWR_LIM_1, nval));
-}
-
/*
* hwm_energy - Obtain energy value
*
@@ -232,11 +218,15 @@ hwm_power1_max_interval_store(struct device *dev,
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
/* Convert to 1.x * power(2,y) */
- if (!val)
- return -EINVAL;
- y = ilog2(val);
- /* x = (val - (1 << y)) >> (y - 2); */
- x = (val - (1ul << y)) << x_w >> y;
+ if (!val) {
+ /* Avoid ilog2(0) */
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ /* x = (val - (1 << y)) >> (y - 2); */
+ x = (val - (1ul << y)) << x_w >> y;
+ }
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
@@ -392,6 +382,22 @@ hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
}
static int
+hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ u32 nval;
+
+ /* Computation in 64-bits to avoid overflow. Round to nearest. */
+ nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
+ nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
+
+ hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1,
+ nval);
+ return 0;
+}
+
+static int
hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
{
struct i915_hwmon *hwmon = ddat->hwmon;
@@ -425,16 +431,11 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
static int
hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
{
- struct i915_hwmon *hwmon = ddat->hwmon;
u32 uval;
switch (attr) {
case hwmon_power_max:
- hwm_field_scale_and_write(ddat,
- hwmon->rg.pkg_rapl_limit,
- hwmon->scl_shift_power,
- SF_POWER, val);
- return 0;
+ return hwm_power_max_write(ddat, val);
case hwmon_power_crit:
uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
return hwm_pcode_write_i1(ddat->uncore->i915, uval);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 54ea28cf8a1a..31271c30a8cf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_pm.h"
/**
* DOC: interrupt handling
@@ -81,8 +80,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915,
}
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
-typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
- enum hpd_pin pin);
+typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
@@ -199,6 +197,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
hpd->hpd = hpd_gen11;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
hpd->hpd = hpd_bxt;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ hpd->hpd = NULL; /* no north HPD on SKL */
else if (DISPLAY_VER(dev_priv) >= 8)
hpd->hpd = hpd_bdw;
else if (DISPLAY_VER(dev_priv) >= 7)
@@ -614,414 +614,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-/*
- * This timing diagram depicts the video signal in and
- * around the vertical blanking period.
- *
- * Assumptions about the fictitious mode used in this example:
- * vblank_start >= 3
- * vsync_start = vblank_start + 1
- * vsync_end = vblank_start + 2
- * vtotal = vblank_start + 3
- *
- * start of vblank:
- * latch double buffered registers
- * increment frame counter (ctg+)
- * generate start of vblank interrupt (gen4+)
- * |
- * | frame start:
- * | generate frame start interrupt (aka. vblank interrupt) (gmch)
- * | may be shifted forward 1-3 extra lines via PIPECONF
- * | |
- * | | start of vsync:
- * | | generate vsync interrupt
- * | | |
- * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
- * . \hs/ . \hs/ \hs/ \hs/ . \hs/
- * ----va---> <-----------------vb--------------------> <--------va-------------
- * | | <----vs-----> |
- * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
- * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
- * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
- * | | |
- * last visible pixel first visible pixel
- * | increment frame counter (gen3/4)
- * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
- *
- * x = horizontal active
- * _ = horizontal blanking
- * hs = horizontal sync
- * va = vertical active
- * vb = vertical blanking
- * vs = vertical sync
- * vbs = vblank_start (number)
- *
- * Summary:
- * - most events happen at the start of horizontal sync
- * - frame start happens at the start of horizontal blank, 1-4 lines
- * (depending on PIPECONF settings) after the start of vblank
- * - gen3/4 pixel and frame counter are synchronized with the start
- * of horizontal active on the first line of vertical active
- */
-
-/* Called from drm generic code, passed a 'crtc', which
- * we use as a pipe index
- */
-u32 i915_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- i915_reg_t high_frame, low_frame;
- u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- unsigned long irqflags;
-
- /*
- * On i965gm TV output the frame counter only works up to
- * the point when we enable the TV encoder. After that the
- * frame counter ceases to work and reads zero. We need a
- * vblank wait before enabling the TV encoder and so we
- * have to enable vblank interrupts while the frame counter
- * is still in a working state. However the core vblank code
- * does not like us returning non-zero frame counter values
- * when we've told it that we don't have a working frame
- * counter. Thus we must stop non-zero values leaking out.
- */
- if (!vblank->max_vblank_count)
- return 0;
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vbl_start = mode->crtc_vblank_start;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
-
- /* Convert to pixel count */
- vbl_start *= htotal;
-
- /* Start of vblank event occurs at start of hsync */
- vbl_start -= htotal - hsync_start;
-
- high_frame = PIPEFRAME(pipe);
- low_frame = PIPEFRAMEPIXEL(pipe);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * High & low register fields aren't synchronized, so make sure
- * we get a low value that's stable across two reads of the high
- * register.
- */
- do {
- high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
- low = intel_de_read_fw(dev_priv, low_frame);
- high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
- } while (high1 != high2);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- high1 >>= PIPE_FRAME_HIGH_SHIFT;
- pixel = low & PIPE_PIXEL_MASK;
- low >>= PIPE_FRAME_LOW_SHIFT;
-
- /*
- * The frame counter increments at beginning of active.
- * Cook up a vblank counter by also checking the pixel
- * counter against vblank start.
- */
- return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
-}
-
-u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- if (!vblank->max_vblank_count)
- return 0;
-
- return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
-}
-
-static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_vblank_crtc *vblank =
- &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- u32 htotal = mode->crtc_htotal;
- u32 clock = mode->crtc_clock;
- u32 scan_prev_time, scan_curr_time, scan_post_time;
-
- /*
- * To avoid the race condition where we might cross into the
- * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
- * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
- * during the same frame.
- */
- do {
- /*
- * This field provides read back of the display
- * pipe frame time stamp. The time stamp value
- * is sampled at every start of vertical blank.
- */
- scan_prev_time = intel_de_read_fw(dev_priv,
- PIPE_FRMTMSTMP(crtc->pipe));
-
- /*
- * The TIMESTAMP_CTR register has the current
- * time stamp value.
- */
- scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
-
- scan_post_time = intel_de_read_fw(dev_priv,
- PIPE_FRMTMSTMP(crtc->pipe));
- } while (scan_post_time != scan_prev_time);
-
- return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
-}
-
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
-{
- struct drm_vblank_crtc *vblank =
- &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
- u32 scanline;
-
- scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
- scanline = min(scanline, vtotal - 1);
- scanline = (scanline + vblank_start) % vtotal;
-
- return scanline;
-}
-
-/*
- * intel_de_read_fw(), only for fast reads of display block, no need for
- * forcewake etc.
- */
-static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank;
- enum pipe pipe = crtc->pipe;
- int position, vtotal;
-
- if (!crtc->active)
- return 0;
-
- vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- mode = &vblank->hwmode;
-
- if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
- return __intel_get_crtc_scanline_from_timestamp(crtc);
-
- vtotal = mode->crtc_vtotal;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vtotal /= 2;
-
- position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
-
- /*
- * On HSW, the DSL reg (0x70000) appears to return 0 if we
- * read it just before the start of vblank. So try it again
- * so we don't accidentally end up spanning a vblank frame
- * increment, causing the pipe_update_end() code to squak at us.
- *
- * The nature of this problem means we can't simply check the ISR
- * bit and return the vblank start value; nor can we use the scanline
- * debug register in the transcoder as it appears to have the same
- * problem. We may need to extend this to include other platforms,
- * but so far testing only shows the problem on HSW.
- */
- if (HAS_DDI(dev_priv) && !position) {
- int i, temp;
-
- for (i = 0; i < 100; i++) {
- udelay(1);
- temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
- if (temp != position) {
- position = temp;
- break;
- }
- }
- }
-
- /*
- * See update_scanline_offset() for the details on the
- * scanline_offset adjustment.
- */
- return (position + crtc->scanline_offset) % vtotal;
-}
-
-static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
- bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct drm_device *dev = _crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- enum pipe pipe = crtc->pipe;
- int position;
- int vbl_start, vbl_end, hsync_start, htotal, vtotal;
- unsigned long irqflags;
- bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
- crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
-
- if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
- drm_dbg(&dev_priv->drm,
- "trying to get scanoutpos for disabled "
- "pipe %c\n", pipe_name(pipe));
- return false;
- }
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vtotal = mode->crtc_vtotal;
- vbl_start = mode->crtc_vblank_start;
- vbl_end = mode->crtc_vblank_end;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- vbl_end /= 2;
- vtotal /= 2;
- }
-
- /*
- * Lock uncore.lock, as we will do multiple timing critical raw
- * register reads, potentially with preemption disabled, so the
- * following code must not block on uncore.lock.
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
-
- /* Get optional system timestamp before query. */
- if (stime)
- *stime = ktime_get();
-
- if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
- int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
-
- position = __intel_get_crtc_scanline(crtc);
-
- /*
- * Already exiting vblank? If so, shift our position
- * so it looks like we're already apporaching the full
- * vblank end. This should make the generated timestamp
- * more or less match when the active portion will start.
- */
- if (position >= vbl_start && scanlines < position)
- position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
- } else if (use_scanline_counter) {
- /* No obvious pixelcount register. Only query vertical
- * scanout position from Display scan line register.
- */
- position = __intel_get_crtc_scanline(crtc);
- } else {
- /* Have access to pixelcount since start of frame.
- * We can split this into vertical and horizontal
- * scanout position.
- */
- position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
-
- /* convert to pixel counts */
- vbl_start *= htotal;
- vbl_end *= htotal;
- vtotal *= htotal;
-
- /*
- * In interlaced modes, the pixel counter counts all pixels,
- * so one field will have htotal more pixels. In order to avoid
- * the reported position from jumping backwards when the pixel
- * counter is beyond the length of the shorter field, just
- * clamp the position the length of the shorter field. This
- * matches how the scanline counter based position works since
- * the scanline counter doesn't count the two half lines.
- */
- if (position >= vtotal)
- position = vtotal - 1;
-
- /*
- * Start of vblank interrupt is triggered at start of hsync,
- * just prior to the first active line of vblank. However we
- * consider lines to start at the leading edge of horizontal
- * active. So, should we get here before we've crossed into
- * the horizontal active of the first line in vblank, we would
- * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
- * always add htotal-hsync_start to the current pixel position.
- */
- position = (position + htotal - hsync_start) % vtotal;
- }
-
- /* Get optional system timestamp after query. */
- if (etime)
- *etime = ktime_get();
-
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- /*
- * While in vblank, position will be negative
- * counting up towards 0 at vbl_end. And outside
- * vblank, position will be positive counting
- * up since vbl_end.
- */
- if (position >= vbl_start)
- position -= vbl_end;
- else
- position += vtotal - vbl_end;
-
- if (use_scanline_counter) {
- *vpos = position;
- *hpos = 0;
- } else {
- *vpos = position / htotal;
- *hpos = position - (*vpos * htotal);
- }
-
- return true;
-}
-
-bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
- ktime_t *vblank_time, bool in_vblank_irq)
-{
- return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
- crtc, max_error, vblank_time, in_vblank_irq,
- i915_get_crtc_scanoutpos);
-}
-
-int intel_get_crtc_scanline(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- unsigned long irqflags;
- int position;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- position = __intel_get_crtc_scanline(crtc);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- return position;
-}
-
/**
* ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -1292,7 +884,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
u32 hotplug = 0;
for_each_intel_encoder(&i915->drm, encoder)
- hotplug |= hotplug_enables(i915, encoder->hpd_pin);
+ hotplug |= hotplug_enables(encoder);
return hotplug;
}
@@ -3243,10 +2835,11 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
/*
* When CPU and PCH are on the same package, port A
@@ -3298,31 +2891,29 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
case HPD_PORT_B:
case HPD_PORT_C:
case HPD_PORT_D:
- return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
+ return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
}
-static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return ICP_TC_HPD_ENABLE(pin);
+ return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -3366,17 +2957,16 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_tc_hpd_detection_setup(dev_priv);
}
-static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return GEN11_HOTPLUG_CTL_ENABLE(pin);
+ return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -3439,10 +3029,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_hpd_irq_setup(dev_priv);
}
-static u32 spt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return PORTA_HOTPLUG_ENABLE;
case HPD_PORT_B:
@@ -3456,10 +3045,9 @@ static u32 spt_hotplug_enables(struct drm_i915_private *i915,
}
}
-static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_E:
return PORTE_HOTPLUG_ENABLE;
default:
@@ -3502,10 +3090,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_2ms;
@@ -3543,25 +3130,24 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
{
u32 hotplug;
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
hotplug = PORTA_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIA_HPD_INVERT;
return hotplug;
case HPD_PORT_B:
hotplug = PORTB_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIB_HPD_INVERT;
return hotplug;
case HPD_PORT_C:
hotplug = PORTC_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIC_HPD_INVERT;
return hotplug;
default:
@@ -3879,15 +3465,33 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
+static u32 i9xx_error_mask(struct drm_i915_private *i915)
+{
+ /*
+ * On gen2/3 FBC generates (seemingly spurious)
+ * display INVALID_GTT/INVALID_GTT_PTE table errors.
+ *
+ * Also gen3 bspec has this to say:
+ * "DISPA_INVALID_GTT_PTE
+ " [DevNapa] : Reserved. This bit does not reflect the page
+ " table error for the display plane A."
+ *
+ * Unfortunately we can't mask off individual PGTBL_ER bits,
+ * so we just have to mask off all page table errors via EMR.
+ */
+ if (HAS_FBC(i915))
+ return ~I915_ERROR_MEMORY_REFRESH;
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
- intel_uncore_write16(uncore,
- EMR,
- ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -3918,9 +3522,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915,
u16 emr;
*eir = intel_uncore_read16(uncore, EIR);
-
- if (*eir)
- intel_uncore_write16(uncore, EIR, *eir);
+ intel_uncore_write16(uncore, EIR, *eir);
*eir_stuck = intel_uncore_read16(uncore, EIR);
if (*eir_stuck == 0)
@@ -3949,6 +3551,9 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3956,7 +3561,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
@@ -3972,7 +3578,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
@@ -3984,6 +3591,9 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
@@ -4053,8 +3663,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4157,26 +3766,31 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
-static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+static u32 i965_error_mask(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 enable_mask;
- u32 error_mask;
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
+ *
+ * i965 FBC no longer generates spurious GTT errors,
+ * so we can always enable the page table errors.
*/
- if (IS_G4X(dev_priv)) {
- error_mask = ~(GM45_ERROR_PAGE_TABLE |
- GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV |
- I915_ERROR_MEMORY_REFRESH);
- } else {
- error_mask = ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
- }
- intel_uncore_write(uncore, EMR, error_mask);
+ if (IS_G4X(i915))
+ return ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 enable_mask;
+
+ intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 9b004fc3444e..03ee4c8b1ed3 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -66,18 +66,12 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
void intel_synchronize_irq(struct drm_i915_private *i915);
void intel_synchronize_hardirq(struct drm_i915_private *i915);
-int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv);
-bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
- ktime_t *vblank_time, bool in_vblank_irq);
-
-u32 i915_get_vblank_counter(struct drm_crtc *crtc);
-u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d634bd3f641a..ade744cccfea 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -222,27 +222,44 @@ i915_param_named_unsafe(lmem_size, uint, 0400,
i915_param_named_unsafe(lmem_bar_size, uint, 0400,
"Set the lmem bar size(in MiB).");
-static __always_inline void _print_param(struct drm_printer *p,
- const char *name,
- const char *type,
- const void *x)
+static void _param_print_bool(struct drm_printer *p, const char *name,
+ bool val)
{
- if (!__builtin_strcmp(type, "bool"))
- drm_printf(p, "i915.%s=%s\n", name,
- str_yes_no(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "unsigned long"))
- drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
- else if (!__builtin_strcmp(type, "char *"))
- drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
- else
- WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
- type, name);
+ drm_printf(p, "i915.%s=%s\n", name, str_yes_no(val));
}
+static void _param_print_int(struct drm_printer *p, const char *name,
+ int val)
+{
+ drm_printf(p, "i915.%s=%d\n", name, val);
+}
+
+static void _param_print_uint(struct drm_printer *p, const char *name,
+ unsigned int val)
+{
+ drm_printf(p, "i915.%s=%u\n", name, val);
+}
+
+static void _param_print_ulong(struct drm_printer *p, const char *name,
+ unsigned long val)
+{
+ drm_printf(p, "i915.%s=%lu\n", name, val);
+}
+
+static void _param_print_charp(struct drm_printer *p, const char *name,
+ const char *val)
+{
+ drm_printf(p, "i915.%s=%s\n", name, val);
+}
+
+#define _param_print(p, name, val) \
+ _Generic(val, \
+ bool: _param_print_bool, \
+ int: _param_print_int, \
+ unsigned int: _param_print_uint, \
+ unsigned long: _param_print_ulong, \
+ char *: _param_print_charp)(p, name, val)
+
/**
* i915_params_dump - dump i915 modparams
* @params: i915 modparams
@@ -252,37 +269,48 @@ static __always_inline void _print_param(struct drm_printer *p,
*/
void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
{
-#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+#define PRINT(T, x, ...) _param_print(p, #x, params->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
-static __always_inline void dup_param(const char *type, void *x)
+static void _param_dup_charp(char **valp)
{
- if (!__builtin_strcmp(type, "char *"))
- *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+ *valp = kstrdup(*valp, GFP_ATOMIC);
}
+static void _param_nop(void *valp)
+{
+}
+
+#define _param_dup(valp) \
+ _Generic(valp, \
+ char **: _param_dup_charp, \
+ default: _param_nop)(valp)
+
void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
{
*dest = *src;
-#define DUP(T, x, ...) dup_param(#T, &dest->x);
+#define DUP(T, x, ...) _param_dup(&dest->x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
}
-static __always_inline void free_param(const char *type, void *x)
+static void _param_free_charp(char **valp)
{
- if (!__builtin_strcmp(type, "char *")) {
- kfree(*(void **)x);
- *(void **)x = NULL;
- }
+ kfree(*valp);
+ *valp = NULL;
}
+#define _param_free(valp) \
+ _Generic(valp, \
+ char **: _param_free_charp, \
+ default: _param_nop)(valp)
+
/* free the allocated members, *not* the passed in params itself */
void i915_params_free(struct i915_params *params)
{
-#define FREE(T, x, ...) free_param(#T, &params->x);
+#define FREE(T, x, ...) _param_free(&params->x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6b4a66734e09..a8d942b16223 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "display/intel_display.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 52531ab28c5f..a76c5ce9513d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -17,7 +17,6 @@
#include "i915_drv.h"
#include "i915_pmu.h"
-#include "intel_pm.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cf80500fff79..34b7d2ac33fb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -116,6 +116,9 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
+#define GU_CNTL_PROTECTED _MMIO(0x10100C)
+#define DEPRESENT REG_BIT(9)
+
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
@@ -541,9 +544,10 @@
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, \
- _BXT_PHY2_BASE)
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
@@ -566,13 +570,14 @@
#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
_BXT_PHY_CTL_DDI_B)
-#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, \
- _PHY_CTL_FAMILY_DDI_C)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1038,9 +1043,11 @@
#define _MBUS_ABOX0_CTL 0x45038
#define _MBUS_ABOX1_CTL 0x45048
#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
- _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL))
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -1730,10 +1737,11 @@
#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK((pipe), _PALETTE_A, \
- _PALETTE_B, _CHV_PALETTE_C) + \
- (i) * 4)
+#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
@@ -1906,48 +1914,72 @@
#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
-/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _EXITLINE_A 0x60018
-#define _PIPEASRC 0x6001c
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+#define _TRANS_HBLANK_A 0x60004
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+#define _TRANS_HSYNC_A 0x60008
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+#define _TRANS_VTOTAL_A 0x6000c
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+#define _TRANS_VBLANK_A 0x60010
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+#define _TRANS_VSYNC_A 0x60014
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+#define _TRANS_EXITLINE_A 0x60018
+#define _PIPEASRC 0x6001c
#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
-#define _PIPE_MULT_A 0x6002c
-
-/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
-#define _PIPE_MULT_B 0x6102c
+#define _BCLRPAT_A 0x60020
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_MULT_A 0x6002c
+
+/* Pipe/transcoder B timing regs */
+#define _TRANS_HTOTAL_B 0x61000
+#define _TRANS_HBLANK_B 0x61004
+#define _TRANS_HSYNC_B 0x61008
+#define _TRANS_VTOTAL_B 0x6100c
+#define _TRANS_VBLANK_B 0x61010
+#define _TRANS_VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define _TRANS_MULT_B 0x6102c
/* DSI 0 timing regs */
-#define _HTOTAL_DSI0 0x6b000
-#define _HSYNC_DSI0 0x6b008
-#define _VTOTAL_DSI0 0x6b00c
-#define _VSYNC_DSI0 0x6b014
-#define _VSYNCSHIFT_DSI0 0x6b028
+#define _TRANS_HTOTAL_DSI0 0x6b000
+#define _TRANS_HSYNC_DSI0 0x6b008
+#define _TRANS_VTOTAL_DSI0 0x6b00c
+#define _TRANS_VSYNC_DSI0 0x6b014
+#define _TRANS_VSYNCSHIFT_DSI0 0x6b028
/* DSI 1 timing regs */
-#define _HTOTAL_DSI1 0x6b800
-#define _HSYNC_DSI1 0x6b808
-#define _VTOTAL_DSI1 0x6b80c
-#define _VSYNC_DSI1 0x6b814
-#define _VSYNCSHIFT_DSI1 0x6b828
+#define _TRANS_HTOTAL_DSI1 0x6b800
+#define _TRANS_HSYNC_DSI1 0x6b808
+#define _TRANS_VTOTAL_DSI1 0x6b80c
+#define _TRANS_VSYNC_DSI1 0x6b814
+#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
@@ -1958,18 +1990,18 @@
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
-#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
-#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
-#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
-#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
-#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
-#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
-#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
-#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
-#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-
-#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -2266,110 +2298,6 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
-/* Icelake DSC Rate Control Range Parameter Registers */
-#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
-#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
-#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define RC_BPG_OFFSET_SHIFT 10
-#define RC_MAX_QP_SHIFT 5
-#define RC_MIN_QP_SHIFT 0
-
-#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
-#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
-#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
-#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
-#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
-#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
-#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
-
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2451,18 +2379,7 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/*
- * HDMI/DP bits are g4x+
- *
- * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
- * Please check the detailed lore in the commit message for for experimental
- * evidence.
- */
-/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
-#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
-#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
-/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+/* HDMI/DP bits are g4x+ */
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
@@ -2592,59 +2509,6 @@
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-/* LVDS port control */
-#define LVDS _MMIO(0x61180)
-/*
- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
- * the DPLL semantics change when the LVDS is assigned to that pipe.
- */
-#define LVDS_PORT_EN (1 << 31)
-/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPE_SEL_SHIFT 30
-#define LVDS_PIPE_SEL_MASK (1 << 30)
-#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
-#define LVDS_PIPE_SEL_SHIFT_CPT 29
-#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
-#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-/* LVDS dithering flag on 965/g4x platform */
-#define LVDS_ENABLE_DITHER (1 << 25)
-/* LVDS sync polarity flags. Set to invert (i.e. negative) */
-#define LVDS_VSYNC_POLARITY (1 << 21)
-#define LVDS_HSYNC_POLARITY (1 << 20)
-
-/* Enable border for unscaled (or aspect-scaled) display */
-#define LVDS_BORDER_ENABLE (1 << 15)
-/*
- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
- * pixel.
- */
-#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
-#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
-#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
-/*
- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
- * on.
- */
-#define LVDS_A3_POWER_MASK (3 << 6)
-#define LVDS_A3_POWER_DOWN (0 << 6)
-#define LVDS_A3_POWER_UP (3 << 6)
-/*
- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
- * is set.
- */
-#define LVDS_CLKB_POWER_MASK (3 << 4)
-#define LVDS_CLKB_POWER_DOWN (0 << 4)
-#define LVDS_CLKB_POWER_UP (3 << 4)
-/*
- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
- * setting for whether we are in dual-channel mode. The B3 pair will
- * additionally only be powered up when LVDS_A3_POWER_UP is set.
- */
-#define LVDS_B0B3_POWER_MASK (3 << 2)
-#define LVDS_B0B3_POWER_DOWN (0 << 2)
-#define LVDS_B0B3_POWER_UP (3 << 2)
-
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA _MMIO(0x61178)
/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
@@ -3492,61 +3356,61 @@
#define _PIPEADSL 0x70000
#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE REG_BIT(31)
-#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define PIPECONF_PIPE_LOCKED REG_BIT(25)
-#define PIPECONF_FORCE_BORDER REG_BIT(25)
-#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0)
-#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1)
-#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0)
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */
+#define _TRANSACONF 0x70008
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
/*
* ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
* DBL=power saving pixel doubling, PF-ID* requires panel fitter
*/
-#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0)
-#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1)
-#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3)
-#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x))
-#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0)
-#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1)
-#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2)
-#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3)
-#define PIPECONF_DITHER_EN REG_BIT(4)
-#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0)
-#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1)
-#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2)
-#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -3615,7 +3479,7 @@
#define PIPE_DSI0_OFFSET 0x7b000
#define PIPE_DSI1_OFFSET 0x7b800
-#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
+#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
@@ -4255,7 +4119,7 @@
/* Pipe B */
#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
-#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
@@ -5432,6 +5296,7 @@
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
+#define GEN12_PIPE_VBLANK_UNMOD (1 << 19)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
@@ -5737,6 +5602,7 @@
#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
@@ -6391,9 +6257,6 @@
#define FDI_PLL_CTL_1 _MMIO(0xfe000)
#define FDI_PLL_CTL_2 _MMIO(0xfe004)
-#define PCH_LVDS _MMIO(0xe1180)
-#define LVDS_DETECTED (1 << 1)
-
#define _PCH_DP_B 0xe4100
#define PCH_DP_B _MMIO(_PCH_DP_B)
#define _PCH_DPB_AUX_CH_CTL 0xe4110
@@ -7214,21 +7077,23 @@ enum skl_power_gate {
ADLS_DPCLKA_DDIK_SEL_MASK)
/* ICL PLL */
-#define DPLL0_ENABLE 0x46010
-#define DPLL1_ENABLE 0x46014
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
#define _ADLS_DPLL2_ENABLE 0x46018
#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE (1 << 31)
-#define PLL_LOCK (1 << 30)
-#define PLL_POWER_ENABLE (1 << 27)
-#define PLL_POWER_STATE (1 << 26)
-#define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
#define _DG2_PLL3_ENABLE 0x4601C
-#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE)
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
#define TBT_PLL_ENABLE _MMIO(0x46020)
@@ -7236,13 +7101,14 @@ enum skl_power_gate {
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as DPLL0_ENABLE */
+/* Bits are the same as _DPLL0_ENABLE */
#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
_MG_PLL2_ENABLE)
/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
/* ADL-P Type C PLL */
#define PORTTC1_PLL_ENABLE 0x46038
@@ -7302,9 +7168,9 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR0 0x164284
#define _TGL_DPLL1_CFGCR0 0x16428C
#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0)
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0)
@@ -7317,40 +7183,36 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1)
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
_TGL_DPLL1_CFGCR1)
#define _DG1_DPLL2_CFGCR0 0x16C284
#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, \
- _DG1_DPLL3_CFGCR0)
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
#define _DG1_DPLL2_CFGCR1 0x16C288
#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, \
- _DG1_DPLL3_CFGCR1)
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
#define _ADLS_DPLL4_CFGCR0 0x164294
-#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, \
- _ADLS_DPLL3_CFGCR0)
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
#define _ADLS_DPLL4_CFGCR1 0x164298
-#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, \
- _ADLS_DPLL3_CFGCR1)
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
@@ -7683,44 +7545,6 @@ enum skl_power_gate {
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
-/* Display Stream Splitter Control */
-#define DSS_CTL1 _MMIO(0x67400)
-#define SPLITTER_ENABLE (1 << 31)
-#define JOINER_ENABLE (1 << 30)
-#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
-#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
-#define OVERLAP_PIXELS_MASK (0xf << 16)
-#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
-#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
-
-#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
-#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-
-#define _ICL_PIPE_DSS_CTL1_PB 0x78200
-#define _ICL_PIPE_DSS_CTL1_PC 0x78400
-#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL1_PB, \
- _ICL_PIPE_DSS_CTL1_PC)
-#define BIG_JOINER_ENABLE (1 << 29)
-#define MASTER_BIG_JOINER_ENABLE (1 << 28)
-#define VGA_CENTERING_ENABLE (1 << 27)
-#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
-#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
-#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
-#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
-#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
-
-#define _ICL_PIPE_DSS_CTL2_PB 0x78204
-#define _ICL_PIPE_DSS_CTL2_PC 0x78404
-#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL2_PB, \
- _ICL_PIPE_DSS_CTL2_PC)
-
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
@@ -7744,314 +7568,6 @@ enum skl_power_gate {
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-/* Icelake Display Stream Compression Registers */
-#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
-#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
-#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
-#define DSC_ALT_ICH_SEL (1 << 20)
-#define DSC_VBR_ENABLE (1 << 19)
-#define DSC_422_ENABLE (1 << 18)
-#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
-#define DSC_BLOCK_PREDICTION (1 << 16)
-#define DSC_LINE_BUF_DEPTH_SHIFT 12
-#define DSC_BPC_SHIFT 8
-#define DSC_VER_MIN_SHIFT 4
-#define DSC_VER_MAJ (0x1 << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
-#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
-#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
-#define DSC_BPP(bpp) ((bpp) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
-#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
-#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
-#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
-#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
-#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
-#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
-#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
-#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
-#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
-#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
-#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
-#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
-#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
-#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
-#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
-#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
-#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
-#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
-#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
-#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
-#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
-#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
-#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
-#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
-#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
-#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
-#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
-#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
-#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
-#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
-#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
-#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
-#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
-#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
-#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
-#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
-#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
-#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
-#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
-#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
-#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
-#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
-#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
-#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
-#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
-#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
-#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
-#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
-#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
-#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
-
-/* Icelake Rate Control Buffer Threshold Registers */
-#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
-#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
-#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
-#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
-#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_PC)
-#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
-
-#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
-#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
-#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
-#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
-#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_PC)
-#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
@@ -8095,8 +7611,54 @@ enum skl_power_gate {
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
-#define DSB_ENABLE (1 << 31)
-#define DSB_STATUS (1 << 0)
+#define DSB_ENABLE REG_BIT(31)
+#define DSB_BUF_REITERATE REG_BIT(29)
+#define DSB_WAIT_FOR_VBLANK REG_BIT(28)
+#define DSB_WAIT_FOR_LINE_IN REG_BIT(27)
+#define DSB_HALT REG_BIT(16)
+#define DSB_NON_POSTED REG_BIT(8)
+#define DSB_STATUS_BUSY REG_BIT(0)
+#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc)
+#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x))
+#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0)
+#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x))
+#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10)
+#define DSB_POLL_ENABLE REG_BIT(31)
+#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23)
+#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */
+#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15)
+#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x))
+#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14)
+#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c)
+#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24)
+#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_ATS_FAULT_INT_EN REG_BIT(20)
+#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
+#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
+#define DSB_POLL_ERR_INT_EN REG_BIT(17)
+#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4)
+#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
+#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
+#define DSB_POLL_ERR_INT_STATUS REG_BIT(1)
+#define DSB_PROG_INT_STATUS REG_BIT(0)
+#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c)
+#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30)
+#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31)
+#define DSB_RM_READY_TIMEOUT REG_BIT(30)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */
+#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0)
+#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */
+#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34)
+#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38)
+#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c)
+#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40)
+#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44)
+#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48)
+#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0)
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index be43580a6979..db26de6b57bc 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -120,6 +120,35 @@
#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
* Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
*
* Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
@@ -136,6 +165,8 @@ typedef struct {
u32 reg;
} i915_mcr_reg_t;
+#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
+
#define INVALID_MMIO_REG _MMIO(0)
/*
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f949a9495758..630a732aaecc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -43,11 +43,11 @@
#include "gt/intel_rps.h"
#include "i915_active.h"
+#include "i915_config.h"
#include "i915_deps.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "intel_pm.h"
struct execute_cb {
struct irq_work work;
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 756289e43dff..7c7a86921b1c 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -98,8 +98,10 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
st = &rsgt->table;
/* restricted by sg_alloc_table */
if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
- unsigned int)))
+ unsigned int))) {
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-E2BIG);
+ }
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
GFP_KERNEL)) {
@@ -183,8 +185,10 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
i915_refct_sgt_init(rsgt, size);
st = &rsgt->table;
/* restricted by sg_alloc_table */
- if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int)))
+ if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-E2BIG);
+ }
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 23777d500cdf..f45bd6b6cede 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
+ if (!HAS_DISPLAY(i915)) {
+ dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
+ return;
+ }
if (state == VGA_SWITCHEROO_ON) {
drm_info(&i915->drm, "switched on\n");
@@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 595e8b574990..e88bb4f04305 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
-#include "intel_pm.h"
struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5272e2be990e..f51fd9fd4c89 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -26,6 +26,7 @@
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
+#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_tiling.h"
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 05e90d09b208..fc5cd14adfcc 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -29,6 +29,7 @@
#include "display/intel_cdclk.h"
#include "display/intel_de.h"
+#include "display/intel_display.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -118,9 +119,14 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "display version: %u\n",
runtime->display.ip.ver);
+ drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
+ drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
+ drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
+ drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
+
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
- drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
+ drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
@@ -201,6 +207,10 @@ static const u16 subplatform_rpl_ids[] = {
INTEL_RPLP_IDS(0),
};
+static const u16 subplatform_rplu_ids[] = {
+ INTEL_RPLU_IDS(0),
+};
+
static const u16 subplatform_g10_ids[] = {
INTEL_DG2_G10_IDS(0),
INTEL_ATS_M150_IDS(0),
@@ -268,6 +278,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_rpl_ids,
ARRAY_SIZE(subplatform_rpl_ids))) {
mask = BIT(INTEL_SUBPLATFORM_RPL);
+ if (find_devid(devid, subplatform_rplu_ids,
+ ARRAY_SIZE(subplatform_rplu_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_RPLU);
} else if (find_devid(devid, subplatform_g10_ids,
ARRAY_SIZE(subplatform_g10_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G10);
@@ -435,6 +448,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
+ if (HAS_DISPLAY(dev_priv) &&
+ (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) &&
+ !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(&dev_priv->drm, "Display not present, disabling\n");
+
+ runtime->pipe_mask = 0;
+ }
+
if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
@@ -456,8 +477,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
runtime->pipe_mask = 0;
- runtime->cpu_transcoder_mask = 0;
- runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
runtime->pipe_mask &= ~BIT(PIPE_C);
@@ -534,5 +553,5 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
{
drm_printf(p, "Has logical contexts? %s\n",
str_yes_no(caps->has_logical_contexts));
- drm_printf(p, "scheduler: %x\n", caps->scheduler);
+ drm_printf(p, "scheduler: 0x%x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d588e5fd2eea..b30cc8b97c3a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -29,7 +29,7 @@
#include "intel_step.h"
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_context_types.h"
@@ -127,6 +127,7 @@ enum intel_platform {
* bit set
*/
#define INTEL_SUBPLATFORM_N 1
+#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
#define INTEL_SUBPLATFORM_M 0
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index ce6b3c3b636a..2b3fe469b360 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -5,8 +5,10 @@
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
+#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_lvds_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
@@ -116,10 +118,10 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(PIPEDSL(PIPE_B));
MMIO_D(PIPEDSL(PIPE_C));
MMIO_D(PIPEDSL(_PIPE_EDP));
- MMIO_D(PIPECONF(PIPE_A));
- MMIO_D(PIPECONF(PIPE_B));
- MMIO_D(PIPECONF(PIPE_C));
- MMIO_D(PIPECONF(_PIPE_EDP));
+ MMIO_D(TRANSCONF(TRANSCODER_A));
+ MMIO_D(TRANSCONF(TRANSCODER_B));
+ MMIO_D(TRANSCONF(TRANSCODER_C));
+ MMIO_D(TRANSCONF(TRANSCODER_EDP));
MMIO_D(PIPESTAT(PIPE_A));
MMIO_D(PIPESTAT(PIPE_B));
MMIO_D(PIPESTAT(PIPE_C));
@@ -217,41 +219,41 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(HTOTAL(TRANSCODER_A));
- MMIO_D(HBLANK(TRANSCODER_A));
- MMIO_D(HSYNC(TRANSCODER_A));
- MMIO_D(VTOTAL(TRANSCODER_A));
- MMIO_D(VBLANK(TRANSCODER_A));
- MMIO_D(VSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_A));
MMIO_D(BCLRPAT(TRANSCODER_A));
- MMIO_D(VSYNCSHIFT(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A));
MMIO_D(PIPESRC(TRANSCODER_A));
- MMIO_D(HTOTAL(TRANSCODER_B));
- MMIO_D(HBLANK(TRANSCODER_B));
- MMIO_D(HSYNC(TRANSCODER_B));
- MMIO_D(VTOTAL(TRANSCODER_B));
- MMIO_D(VBLANK(TRANSCODER_B));
- MMIO_D(VSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_B));
MMIO_D(BCLRPAT(TRANSCODER_B));
- MMIO_D(VSYNCSHIFT(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B));
MMIO_D(PIPESRC(TRANSCODER_B));
- MMIO_D(HTOTAL(TRANSCODER_C));
- MMIO_D(HBLANK(TRANSCODER_C));
- MMIO_D(HSYNC(TRANSCODER_C));
- MMIO_D(VTOTAL(TRANSCODER_C));
- MMIO_D(VBLANK(TRANSCODER_C));
- MMIO_D(VSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_C));
MMIO_D(BCLRPAT(TRANSCODER_C));
- MMIO_D(VSYNCSHIFT(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C));
MMIO_D(PIPESRC(TRANSCODER_C));
- MMIO_D(HTOTAL(TRANSCODER_EDP));
- MMIO_D(HBLANK(TRANSCODER_EDP));
- MMIO_D(HSYNC(TRANSCODER_EDP));
- MMIO_D(VTOTAL(TRANSCODER_EDP));
- MMIO_D(VBLANK(TRANSCODER_EDP));
- MMIO_D(VSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_EDP));
MMIO_D(BCLRPAT(TRANSCODER_EDP));
- MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
@@ -492,9 +494,9 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(PIPE_MULT(PIPE_A));
- MMIO_D(PIPE_MULT(PIPE_B));
- MMIO_D(PIPE_MULT(PIPE_C));
+ MMIO_D(TRANS_MULT(TRANSCODER_A));
+ MMIO_D(TRANS_MULT(TRANSCODER_B));
+ MMIO_D(TRANS_MULT(TRANSCODER_C));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b9a164efd6ae..3d1fdea9811d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -235,7 +235,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
mem->i915 = i915;
- mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->region = DEFINE_RES_MEM(start, size);
mem->io_start = io_start;
mem->io_size = io_size;
mem->min_page_size = min_page_size;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ac61df46d02c..c45af0d981fd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include "display/intel_de.h"
+#include "display/intel_display.h"
#include "display/intel_display_trace.h"
#include "display/skl_watermark.h"
@@ -43,13 +44,6 @@ struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -130,3961 +124,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u16 ddrpll, csipll;
-
- ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
- csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-}
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
- bool is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if (enable)
- val &= ~FORCE_DDR_HIGH_FREQ;
- else
- val |= FORCE_DDR_HIGH_FREQ;
- val &= ~FORCE_DDR_LOW_FREQ;
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
- "timed out waiting for Punit DDR DVFS request\n");
-
- vlv_punit_put(dev_priv);
-}
-
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (enable)
- val |= DSP_MAXFIFO_PM5_ENABLE;
- else
- val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-
- vlv_punit_put(dev_priv);
-}
-
-#define FW_WM(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool was_enabled;
- u32 val;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
- if (enable)
- val |= PINEVIEW_SELF_REFRESH_EN;
- else
- val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
- /*
- * FIXME can't find a bit like this for 915G, and
- * and yet it does have the related watermark in
- * FW_BLC_SELF. What's going on?
- */
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
- } else {
- return false;
- }
-
- trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
-
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
- str_enabled_disabled(enable),
- str_enabled_disabled(was_enabled));
-
- return was_enabled;
-}
-
-/**
- * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
- * @enable: Allow vs. disallow CxSR
- *
- * Allow or disallow the system to enter a special CxSR
- * (C-state self refresh) state. What typically happens in CxSR mode
- * is that several display FIFOs may get combined into a single larger
- * FIFO for a particular plane (so called max FIFO mode) to allow the
- * system to defer memory fetches longer, and the memory will enter
- * self refresh.
- *
- * Note that enabling CxSR does not guarantee that the system enter
- * this special mode, nor does it guarantee that the system stays
- * in that mode once entered. So this just allows/disallows the system
- * to autonomously utilize the CxSR mode. Other factors such as core
- * C-states will affect when/if the system actually enters/exits the
- * CxSR mode.
- *
- * Note that on VLV/CHV this actually only controls the max FIFO mode,
- * and the system is free to enter/exit memory self refresh at any time
- * even when the use of CxSR has been disallowed.
- *
- * While the system is actually in the CxSR/max FIFO mode, some plane
- * control registers will not get latched on vblank. Thus in order to
- * guarantee the system will respond to changes in the plane registers
- * we must always disallow CxSR prior to making changes to those registers.
- * Unfortunately the system will re-evaluate the CxSR conditions at
- * frame start which happens after vblank start (which is when the plane
- * registers would get latched), so we can't proceed with the plane update
- * during the same frame where we disallowed CxSR.
- *
- * Certain platforms also have a deeper HPLL SR mode. Fortunately the
- * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
- * the hardware w.r.t. HPLL SR when writing to plane registers.
- * Disallowing just CxSR is sufficient.
- */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool ret;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-
- return ret;
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int pessimal_latency_ns = 5000;
-
-#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
- ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-
-static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- int sprite0_start, sprite1_start;
- u32 dsparb, dsparb2, dsparb3;
-
- switch (pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
- break;
- case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
- sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
- sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
- break;
- default:
- MISSING_CASE(pipe);
- return;
- }
-
- fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
- fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
- fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
- fifo_state->plane[PLANE_CURSOR] = 63;
-}
-
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pnv_display_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_display_hplloff_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i965_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = I965_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i945_wm_info = {
- .fifo_size = I945_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i915_wm_info = {
- .fifo_size = I915_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_a_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_bc_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM/2,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i845_wm_info = {
- .fifo_size = I830_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-/**
- * intel_wm_method1 - Method 1 / "small buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the short term drain rate
- * of the FIFO, ie. it does not account for blanking periods
- * which would effectively reduce the average drain rate across
- * a longer period. The name "small" refers to the fact the
- * FIFO is relatively small compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\ |\
- * | \ | \
- * __---__---__ (- plane active, _ blanking)
- * -> time
- *
- * or perhaps like this:
- *
- * |\|\ |\|\
- * __----__----__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- u64 ret;
-
- ret = mul_u32_u32(pixel_rate, cpp * latency);
- ret = DIV_ROUND_UP_ULL(ret, 10000);
-
- return ret;
-}
-
-/**
- * intel_wm_method2 - Method 2 / "large buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @htotal: Pipe horizontal total
- * @width: Plane width in pixels
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the long term drain rate
- * of the FIFO, ie. it does account for blanking periods
- * which effectively reduce the average drain rate across
- * a longer period. The name "large" refers to the fact the
- * FIFO is relatively large compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\___ |\___
- * | \___ | \___
- * | \ | \
- * __ --__--__--__--__--__--__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- /*
- * FIXME remove once all users are computing
- * watermarks in the correct place.
- */
- if (WARN_ON_ONCE(htotal == 0))
- htotal = 1;
-
- ret = (latency * pixel_rate) / (htotal * 10000);
- ret = (ret + 1) * width * cpp;
-
- return ret;
-}
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @pixel_rate: pixel clock
- * @wm: chip FIFO params
- * @fifo_size: size of the FIFO buffer
- * @cpp: bytes per pixel
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned int intel_calculate_wm(int pixel_rate,
- const struct intel_watermark_params *wm,
- int fifo_size, int cpp,
- unsigned int latency_ns)
-{
- int entries, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries = intel_wm_method1(pixel_rate, cpp,
- latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
- wm->guard_size;
- DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
-
- wm_size = fifo_size - entries;
- DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
-
- /*
- * Bspec seems to indicate that the value shouldn't be lower than
- * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
- * Lets go for 8 which is the burst size since certain platforms
- * already use a hardcoded 8 (which is what the spec says should be
- * done).
- */
- if (wm_size <= 8)
- wm_size = 8;
-
- return wm_size;
-}
-
-static bool is_disabling(int old, int new, int threshold)
-{
- return old >= threshold && new < threshold;
-}
-
-static bool is_enabling(int old, int new, int threshold)
-{
- return old < threshold && new >= threshold;
-}
-
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
- return dev_priv->display.wm.max_level + 1;
-}
-
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-
- /* FIXME check the 'enable' instead */
- if (!crtc_state->hw.active)
- return false;
-
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (plane->id == PLANE_CURSOR)
- return plane_state->hw.fb != NULL;
- else
- return plane_state->uapi.visible;
-}
-
-static bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc && crtc->active && crtc->base.primary->state->fb &&
- crtc->config->hw.adjusted_mode.crtc_clock;
-}
-
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc, *enabled = NULL;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- if (intel_crtc_active(crtc)) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned int wm;
-
- latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- drm_dbg_kms(&dev_priv->drm,
- "Unknown FSB/MEM found, disable CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
- return;
- }
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int cpp = fb->format->cpp[0];
-
- /* Display SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
- pnv_display_wm.fifo_size,
- cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
- pnv_display_wm.fifo_size,
- 4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- 4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
-
- intel_set_memory_cxsr(dev_priv, true);
- } else {
- intel_set_memory_cxsr(dev_priv, false);
- }
-}
-
-/*
- * Documentation says:
- * "If the line size is small, the TLB fetches can get in the way of the
- * data fetches, causing some lag in the pixel data return which is not
- * accounted for in the above formulas. The following adjustment only
- * needs to be applied if eight whole lines fit in the buffer at once.
- * The WM is adjusted upwards by the difference between the FIFO size
- * and the size of 8 whole lines. This adjustment is always performed
- * in the actual pixel depth regardless of whether FBC is enabled or not."
- */
-static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
-{
- int tlb_miss = fifo_size * 64 - width * cpp * 8;
-
- return max(0, tlb_miss);
-}
-
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
- const struct g4x_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#define FW_WM_VLV(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
- const struct vlv_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
- }
-
- /*
- * Zero the (unused) WM1 watermarks, and also clear all the
- * high order bits so that there are no out of bounds values
- * present in the registers during the reprogramming.
- */
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- } else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- }
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#undef FW_WM_VLV
-
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
-
- dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
-}
-
-static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
-{
- /*
- * DSPCNTR[13] supposedly controls whether the
- * primary plane can use the FIFO space otherwise
- * reserved for the sprite plane. It's not 100% clear
- * what the actual FIFO size is, but it looks like we
- * can happily set both primary and sprite watermarks
- * up to 127 cachelines. So that would seem to mean
- * that either DSPCNTR[13] doesn't do anything, or that
- * the total FIFO is >= 256 cachelines in size. Either
- * way, we don't seem to have to worry about this
- * repartitioning as the maximum watermark value the
- * register can hold for each plane is lower than the
- * minimum FIFO size.
- */
- switch (plane_id) {
- case PLANE_CURSOR:
- return 63;
- case PLANE_PRIMARY:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
- case PLANE_SPRITE0:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
- default:
- MISSING_CASE(plane_id);
- return 0;
- }
-}
-
-static int g4x_fbc_fifo_size(int level)
-{
- switch (level) {
- case G4X_WM_LEVEL_SR:
- return 7;
- case G4X_WM_LEVEL_HPLL:
- return 15;
- default:
- MISSING_CASE(level);
- return 0;
- }
-}
-
-static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (latency == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- /*
- * WaUse32BppForSRWM:ctg,elk
- *
- * The spec fails to list this restriction for the
- * HPLL watermark, which seems a little strange.
- * Let's use 32bpp for the HPLL watermark as well.
- */
- if (plane->id == PLANE_PRIMARY &&
- level != G4X_WM_LEVEL_NORMAL)
- cpp = max(cpp, 4u);
-
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
- } else if (plane->id == PLANE_PRIMARY &&
- level == G4X_WM_LEVEL_NORMAL) {
- wm = intel_wm_method1(pixel_rate, cpp, latency);
- } else {
- unsigned int small, large;
-
- small = intel_wm_method1(pixel_rate, cpp, latency);
- large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
-
- wm = min(small, large);
- }
-
- wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
- width, cpp);
-
- wm = DIV_ROUND_UP(wm, 64) + 2;
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
- int level, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- /* NORMAL level doesn't have an FBC watermark */
- level = max(level, G4X_WM_LEVEL_SR);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->fbc != value;
- raw->fbc = value;
- }
-
- return dirty;
-}
-
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val);
-
-static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- enum plane_id plane_id = plane->id;
- bool dirty = false;
- int level;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
- int wm, max_wm;
-
- wm = g4x_compute_wm(crtc_state, plane_state, level);
- max_wm = g4x_plane_fifo_size(plane_id, level);
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
-
- if (plane_id != PLANE_PRIMARY ||
- level == G4X_WM_LEVEL_NORMAL)
- continue;
-
- wm = ilk_compute_fbc_wm(crtc_state, plane_state,
- raw->plane[plane_id]);
- max_wm = g4x_fbc_fifo_size(level);
-
- /*
- * FBC wm is not mandatory as we
- * can always just disable its use.
- */
- if (wm > max_wm)
- wm = USHRT_MAX;
-
- dirty |= raw->fbc != wm;
- raw->fbc = wm;
- }
-
- /* mark watermarks as invalid */
- dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- out:
- if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
-
- if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
- "FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
- }
-
- return dirty;
-}
-
-static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
-}
-
-static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (level > dev_priv->display.wm.max_level)
- return false;
-
- return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void g4x_invalidate_wms(struct intel_crtc *crtc,
- struct g4x_wm_state *wm_state, int level)
-{
- if (level <= G4X_WM_LEVEL_NORMAL) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_SR) {
- wm_state->cxsr = false;
- wm_state->sr.cursor = USHRT_MAX;
- wm_state->sr.plane = USHRT_MAX;
- wm_state->sr.fbc = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_HPLL) {
- wm_state->hpll_en = false;
- wm_state->hpll.cursor = USHRT_MAX;
- wm_state->hpll.plane = USHRT_MAX;
- wm_state->hpll.fbc = USHRT_MAX;
- }
-}
-
-static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
- int level)
-{
- if (level < G4X_WM_LEVEL_SR)
- return false;
-
- if (level >= G4X_WM_LEVEL_SR &&
- wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
- return false;
-
- if (level >= G4X_WM_LEVEL_HPLL &&
- wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
- return false;
-
- return true;
-}
-
-static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct g4x_pipe_wm *raw;
- enum plane_id plane_id;
- int level;
-
- level = G4X_WM_LEVEL_NORMAL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = raw->plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
- wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
- wm_state->sr.fbc = raw->fbc;
-
- wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
-
- level = G4X_WM_LEVEL_HPLL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
- wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
- wm_state->hpll.fbc = raw->fbc;
-
- wm_state->hpll_en = wm_state->cxsr;
-
- level++;
-
- out:
- if (level == G4X_WM_LEVEL_NORMAL)
- return -EINVAL;
-
- /* invalidate the higher levels */
- g4x_invalidate_wms(crtc, wm_state, level);
-
- /*
- * Determine if the FBC watermark(s) can be used. IF
- * this isn't the case we prefer to disable the FBC
- * watermark(s) rather than disable the SR/HPLL
- * level(s) entirely. 'level-1' is the highest valid
- * level here.
- */
- wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
-
- return 0;
-}
-
-static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- if (!dirty)
- return 0;
-
- return _g4x_compute_pipe_wm(crtc_state);
-}
-
-static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
- const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
- const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
- enum plane_id plane_id;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- intermediate->hpll_en = false;
- goto out;
- }
-
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
- intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
- !new_crtc_state->disable_cxsr;
- intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm.plane[plane_id] =
- max(optimal->wm.plane[plane_id],
- active->wm.plane[plane_id]);
-
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
- g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
- }
-
- intermediate->sr.plane = max(optimal->sr.plane,
- active->sr.plane);
- intermediate->sr.cursor = max(optimal->sr.cursor,
- active->sr.cursor);
- intermediate->sr.fbc = max(optimal->sr.fbc,
- active->sr.fbc);
-
- intermediate->hpll.plane = max(optimal->hpll.plane,
- active->hpll.plane);
- intermediate->hpll.cursor = max(optimal->hpll.cursor,
- active->hpll.cursor);
- intermediate->hpll.fbc = max(optimal->hpll.fbc,
- active->hpll.fbc);
-
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
- intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
- intermediate->hpll_en);
-
- drm_WARN_ON(&dev_priv->drm,
- intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
- intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
- intermediate->fbc_en && intermediate->hpll_en);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->cxsr = true;
- wm->hpll_en = true;
- wm->fbc_en = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
- if (!wm_state->hpll_en)
- wm->hpll_en = false;
- if (!wm_state->fbc_en)
- wm->fbc_en = false;
-
- num_active_pipes++;
- }
-
- if (num_active_pipes != 1) {
- wm->cxsr = false;
- wm->hpll_en = false;
- wm->fbc_en = false;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm;
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr;
- if (crtc->active && wm->hpll_en)
- wm->hpll = wm_state->hpll;
- }
-}
-
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
- struct g4x_wm_values new_wm = {};
-
- g4x_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- g4x_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int vlv_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64);
-
- return ret;
-}
-
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
- }
-}
-
-static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (dev_priv->display.wm.pri_latency[level] == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- /*
- * FIXME the formula gives values that are
- * too big for the cursor FIFO, and hence we
- * would never be able to use cursors. For
- * now just hardcode the watermark.
- */
- wm = 63;
- } else {
- wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
- }
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
-{
- return (active_planes & (BIT(PLANE_SPRITE0) |
- BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
-}
-
-static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- const int fifo_size = 511;
- int fifo_extra, fifo_left = fifo_size;
- int sprite0_fifo_extra = 0;
- unsigned int total_rate;
- enum plane_id plane_id;
-
- /*
- * When enabling sprite0 after sprite1 has already been enabled
- * we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
- * cacheline for sprite0 whenever sprite1 is enabled.
- *
- * All other plane enable sequences appear immune to this problem.
- */
- if (vlv_need_sprite0_fifo_workaround(active_planes))
- sprite0_fifo_extra = 1;
-
- total_rate = raw->plane[PLANE_PRIMARY] +
- raw->plane[PLANE_SPRITE0] +
- raw->plane[PLANE_SPRITE1] +
- sprite0_fifo_extra;
-
- if (total_rate > fifo_size)
- return -EINVAL;
-
- if (total_rate == 0)
- total_rate = 1;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- unsigned int rate;
-
- if ((active_planes & BIT(plane_id)) == 0) {
- fifo_state->plane[plane_id] = 0;
- continue;
- }
-
- rate = raw->plane[plane_id];
- fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
- fifo_left -= fifo_state->plane[plane_id];
- }
-
- fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
- fifo_left -= sprite0_fifo_extra;
-
- fifo_state->plane[PLANE_CURSOR] = 63;
-
- fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
-
- /* spread the remainder evenly */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- int plane_extra;
-
- if (fifo_left == 0)
- break;
-
- if ((active_planes & BIT(plane_id)) == 0)
- continue;
-
- plane_extra = min(fifo_extra, fifo_left);
- fifo_state->plane[plane_id] += plane_extra;
- fifo_left -= plane_extra;
- }
-
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
-
- /* give it all to the first plane if none are active */
- if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
- fifo_state->plane[PLANE_PRIMARY] = fifo_left;
- }
-
- return 0;
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void vlv_invalidate_wms(struct intel_crtc *crtc,
- struct vlv_wm_state *wm_state, int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm[level].plane[plane_id] = USHRT_MAX;
-
- wm_state->sr[level].cursor = USHRT_MAX;
- wm_state->sr[level].plane = USHRT_MAX;
- }
-}
-
-static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
-{
- if (wm > fifo_size)
- return USHRT_MAX;
- else
- return fifo_size - wm;
-}
-
-/*
- * Starting from 'level' set all higher
- * levels to 'value' in the "raw" watermarks.
- */
-static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(dev_priv);
- bool dirty = false;
-
- for (; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- enum plane_id plane_id = plane->id;
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- int level;
- bool dirty = false;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
- int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
- }
-
- /* mark all higher levels as invalid */
- dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
-out:
- if (dirty)
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
-
- return raw->plane[plane_id] <= fifo_state->plane[plane_id];
-}
-
-static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
-{
- return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- enum plane_id plane_id;
- int level;
-
- /* initially allow all levels */
- wm_state->num_levels = intel_wm_num_levels(dev_priv);
- /*
- * Note that enabling cxsr with no primary/sprite planes
- * enabled can wedge the pipe. Hence we only allow cxsr
- * with exactly one enabled primary/sprite plane.
- */
- wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
-
- for (level = 0; level < wm_state->num_levels; level++) {
- const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
-
- if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
- break;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- wm_state->wm[level].plane[plane_id] =
- vlv_invert_wm_value(raw->plane[plane_id],
- fifo_state->plane[plane_id]);
- }
-
- wm_state->sr[level].plane =
- vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
- raw->plane[PLANE_SPRITE0],
- raw->plane[PLANE_SPRITE1]),
- sr_fifo_size);
-
- wm_state->sr[level].cursor =
- vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
- 63);
- }
-
- if (level == 0)
- return -EINVAL;
-
- /* limit to only levels we can actually handle */
- wm_state->num_levels = level;
-
- /* invalidate the higher levels */
- vlv_invalidate_wms(crtc, wm_state, level);
-
- return 0;
-}
-
-static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- /*
- * DSPARB registers may have been reset due to the
- * power well being turned off. Make sure we restore
- * them to a consistent state even if no primary/sprite
- * planes are initially active. We also force a FIFO
- * recomputation so that we are sure to sanitize the
- * FIFO setting we took over from the BIOS even if there
- * are no active planes on the crtc.
- */
- if (intel_crtc_needs_modeset(crtc_state))
- dirty = ~0;
-
- if (!dirty)
- return 0;
-
- /* cursor changes don't warrant a FIFO recompute */
- if (dirty & ~BIT(PLANE_CURSOR)) {
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct vlv_fifo_state *old_fifo_state =
- &old_crtc_state->wm.vlv.fifo_state;
- const struct vlv_fifo_state *new_fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int ret;
-
- ret = vlv_compute_fifo(crtc_state);
- if (ret)
- return ret;
-
- if (intel_crtc_needs_modeset(crtc_state) ||
- memcmp(old_fifo_state, new_fifo_state,
- sizeof(*new_fifo_state)) != 0)
- crtc_state->fifo_changed = true;
- }
-
- return _vlv_compute_pipe_wm(crtc_state);
-}
-
-#define VLV_FIFO(plane, value) \
- (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
-
-static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int sprite0_start, sprite1_start, fifo_size;
- u32 dsparb, dsparb2, dsparb3;
-
- if (!crtc_state->fifo_changed)
- return;
-
- sprite0_start = fifo_state->plane[PLANE_PRIMARY];
- sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
- fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
-
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
-
- trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
-
- /*
- * uncore.lock serves a double purpose here. It allows us to
- * use the less expensive I915_{READ,WRITE}_FW() functions, and
- * it protects the DSPARB registers from getting clobbered by
- * parallel updates from multiple pipes.
- *
- * intel_pipe_update_start() has already disabled interrupts
- * for us, so a plain spin_lock() is sufficient here.
- */
- spin_lock(&uncore->lock);
-
- switch (crtc->pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
- VLV_FIFO(SPRITEB, 0xff));
- dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
- VLV_FIFO(SPRITEB, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
- VLV_FIFO(SPRITEB_HI, 0x1));
- dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
- VLV_FIFO(SPRITED, 0xff));
- dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
- VLV_FIFO(SPRITED, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
- VLV_FIFO(SPRITED_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
- VLV_FIFO(SPRITEF, 0xff));
- dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
- VLV_FIFO(SPRITEF, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
- VLV_FIFO(SPRITEF_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- default:
- break;
- }
-
- intel_uncore_posting_read_fw(uncore, DSPARB);
-
- spin_unlock(&uncore->lock);
-}
-
-#undef VLV_FIFO
-
-static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
- const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
- const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
- int level;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- goto out;
- }
-
- intermediate->num_levels = min(optimal->num_levels, active->num_levels);
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
-
- for (level = 0; level < intermediate->num_levels; level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm[level].plane[plane_id] =
- min(optimal->wm[level].plane[plane_id],
- active->wm[level].plane[plane_id]);
- }
-
- intermediate->sr[level].plane = min(optimal->sr[level].plane,
- active->sr[level].plane);
- intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
- active->sr[level].cursor);
- }
-
- vlv_invalidate_wms(crtc, intermediate, level);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->level = dev_priv->display.wm.max_level;
- wm->cxsr = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
-
- num_active_pipes++;
- wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
- }
-
- if (num_active_pipes != 1)
- wm->cxsr = false;
-
- if (num_active_pipes > 1)
- wm->level = VLV_WM_LEVEL_PM2;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm[wm->level];
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr[wm->level];
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
- }
-}
-
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
- struct vlv_wm_values new_wm = {};
-
- vlv_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- vlv_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void i965_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
- bool cxsr_enabled;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp = fb->format->cpp[0];
- int entries;
-
- entries = intel_wm_method2(pixel_rate, htotal,
- width, cpp, sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = intel_wm_method2(pixel_rate, htotal,
- crtc->base.cursor->state->crtc_w, 4,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size) +
- i965_cursor_wm_info.guard_size;
-
- cursor_sr = i965_cursor_wm_info.fifo_size - entries;
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- /* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
- /* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
-
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-#undef FW_WM
-
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
- enum i9xx_plane_id i9xx_plane)
-{
- struct intel_plane *plane;
-
- for_each_intel_plane(&i915->drm, plane) {
- if (plane->id == PLANE_PRIMARY &&
- plane->i9xx_plane == i9xx_plane)
- return intel_crtc_for_pipe(i915, plane->pipe);
- }
-
- return NULL;
-}
-
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
-{
- const struct intel_watermark_params *wm_info;
- u32 fwater_lo;
- u32 fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct intel_crtc *crtc;
-
- if (IS_I945GM(dev_priv))
- wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
- wm_info = &i915_wm_info;
- else
- wm_info = &i830_a_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planea_wm = fifo_size - wm_info->guard_size;
- if (planea_wm > (long)wm_info->max_wm)
- planea_wm = wm_info->max_wm;
- }
-
- if (DISPLAY_VER(dev_priv) == 2)
- wm_info = &i830_bc_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planeb_wm = fifo_size - wm_info->guard_size;
- if (planeb_wm > (long)wm_info->max_wm)
- planeb_wm = wm_info->max_wm;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
- struct drm_i915_gem_object *obj;
-
- obj = intel_fb_obj(crtc->base.primary->state->fb);
-
- /* self-refresh seems busted with untiled */
- if (!i915_gem_object_is_tiled(obj))
- crtc = NULL;
- }
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp;
- int entries;
-
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
-
- if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-static void i845_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- u32 fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- &i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
- 4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d\n", planea_wm);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method1(pixel_rate, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
-{
- /*
- * Neither of these should be possible since this function shouldn't be
- * called if the CRTC is off or the plane is invisible. But let's be
- * extra paranoid to avoid a potential divide-by-zero if we screw up
- * elsewhere in the driver.
- */
- if (WARN_ON(!cpp))
- return 0;
- if (WARN_ON(!horiz_pixels))
- return 0;
-
- return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
-}
-
-struct ilk_wm_maximums {
- u16 pri;
- u16 spr;
- u16 cur;
- u16 fbc;
-};
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value, bool is_lp)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
-
- if (!is_lp)
- return method1;
-
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-}
-
-/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val)
-{
- int cpp;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp);
-}
-
-static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 768;
- else
- return 512;
-}
-
-static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
- int level, bool is_sprite)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- /* BDW primary/sprite plane watermarks */
- return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
- /* IVB/HSW primary/sprite plane watermarks */
- return level == 0 ? 127 : 1023;
- else if (!is_sprite)
- /* ILK/SNB primary plane watermarks */
- return level == 0 ? 127 : 511;
- else
- /* ILK/SNB sprite plane watermarks */
- return level == 0 ? 63 : 255;
-}
-
-static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 7)
- return level == 0 ? 63 : 255;
- else
- return level == 0 ? 31 : 63;
-}
-
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 31;
- else
- return 15;
-}
-
-/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- bool is_sprite)
-{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
-
- /* if sprites aren't enabled, sprites get nothing */
- if (is_sprite && !config->sprites_enabled)
- return 0;
-
- /* HSW allows LP1+ watermarks even with multiple pipes */
- if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
-
- /*
- * For some reason the non self refresh
- * FIFO size is only half of the self
- * refresh FIFO size on ILK/SNB.
- */
- if (DISPLAY_VER(dev_priv) <= 6)
- fifo_size /= 2;
- }
-
- if (config->sprites_enabled) {
- /* level 0 is always calculated with 1:1 split */
- if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
- if (is_sprite)
- fifo_size *= 5;
- fifo_size /= 6;
- } else {
- fifo_size /= 2;
- }
- }
-
- /* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
-}
-
-/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config)
-{
- /* HSW LP1+ watermarks w/ multiple pipes */
- if (level > 0 && config->num_pipes_active > 1)
- return 64;
-
- /* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
-}
-
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
- int level,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static bool ilk_validate_wm_level(int level,
- const struct ilk_wm_maximums *max,
- struct intel_wm_level *result)
-{
- bool ret;
-
- /* already determined to be invalid? */
- if (!result->enable)
- return false;
-
- result->enable = result->pri_val <= max->pri &&
- result->spr_val <= max->spr &&
- result->cur_val <= max->cur;
-
- ret = result->enable;
-
- /*
- * HACK until we can pre-compute everything,
- * and thus fail gracefully if LP0 watermarks
- * are exceeded...
- */
- if (level == 0 && !result->enable) {
- if (result->pri_val > max->pri)
- DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
- level, result->pri_val, max->pri);
- if (result->spr_val > max->spr)
- DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
- level, result->spr_val, max->spr);
- if (result->cur_val > max->cur)
- DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
- level, result->cur_val, max->cur);
-
- result->pri_val = min_t(u32, result->pri_val, max->pri);
- result->spr_val = min_t(u32, result->spr_val, max->spr);
- result->cur_val = min_t(u32, result->cur_val, max->cur);
- result->enable = true;
- }
-
- return ret;
-}
-
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *crtc,
- int level,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *pristate,
- const struct intel_plane_state *sprstate,
- const struct intel_plane_state *curstate,
- struct intel_wm_level *result)
-{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
-
- /* WM1+ latency values stored in 0.5us units */
- if (level > 0) {
- pri_latency *= 5;
- spr_latency *= 5;
- cur_latency *= 5;
- }
-
- if (pristate) {
- result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
- pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
- }
-
- if (sprstate)
- result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
-
- if (curstate)
- result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
-
- result->enable = true;
-}
-
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u64 sskpd;
-
- sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
-}
-
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 sskpd;
-
- sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
-}
-
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 mltr;
-
- mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
-}
-
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
- /* how many WM levels are we expecting */
- if (HAS_HW_SAGV_WM(dev_priv))
- return 5;
- else if (DISPLAY_VER(dev_priv) >= 9)
- return 7;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 4;
- else if (DISPLAY_VER(dev_priv) >= 6)
- return 3;
- else
- return 2;
-}
-
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[])
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = wm[level];
-
- if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency not provided\n",
- name, level);
- continue;
- }
-
- /*
- * - latencies are in us on gen9.
- * - before then, WM1+ latency values are in 0.5us units
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency %u (%u.%u usec)\n", name, level,
- wm[level], latency / 10, latency % 10);
- }
-}
-
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- if (wm[0] >= min)
- return false;
-
- wm[0] = max(wm[0], min);
- for (level = 1; level <= max_level; level++)
- wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
-
- return true;
-}
-
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
-{
- bool changed;
-
- /*
- * The BIOS provided WM memory latency values are often
- * inadequate for high resolution displays. Adjust them.
- */
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
-
- if (!changed)
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
-{
- /*
- * On some SNB machines (Thinkpad X220 Tablet at least)
- * LP3 usage can cause vblank interrupts to be lost.
- * The DEIIR bit will go high but it looks like the CPU
- * never gets interrupted.
- *
- * It's not clear whether other interrupt source could
- * be affected or if this is somehow limited to vblank
- * interrupts only. To play it safe we disable LP3
- * watermarks entirely.
- */
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
- return;
-
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
-
- drm_dbg_kms(&dev_priv->drm,
- "LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
-
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
-
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
- }
-}
-
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *pipe_wm)
-{
- /* LP0 watermark maximums depend on this pipe alone */
- const struct intel_wm_config config = {
- .num_pipes_active = 1,
- .sprites_enabled = pipe_wm->sprites_enabled,
- .sprites_scaled = pipe_wm->sprites_scaled,
- };
- struct ilk_wm_maximums max;
-
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
-
- /* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
- return false;
- }
-
- return true;
-}
-
-/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_pipe_wm *pipe_wm;
- struct intel_plane *plane;
- const struct intel_plane_state *plane_state;
- const struct intel_plane_state *pristate = NULL;
- const struct intel_plane_state *sprstate = NULL;
- const struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
- struct ilk_wm_maximums max;
-
- pipe_wm = &crtc_state->wm.ilk.optimal;
-
- intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- pristate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
- curstate = plane_state;
- }
-
- pipe_wm->pipe_enabled = crtc_state->hw.active;
- pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
- pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
-
- usable_level = max_level;
-
- /* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
- usable_level = 1;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
- if (pipe_wm->sprites_scaled)
- usable_level = 0;
-
- memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
- pristate, sprstate, curstate, &pipe_wm->wm[0]);
-
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
- return -EINVAL;
-
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
-
- for (level = 1; level <= usable_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->wm[level];
-
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
- pristate, sprstate, curstate, wm);
-
- /*
- * Disable any watermark level that exceeds the
- * register maximums since such watermarks are
- * always invalid.
- */
- if (!ilk_validate_wm_level(level, &max, wm)) {
- memset(wm, 0, sizeof(*wm));
- break;
- }
- }
-
- return 0;
-}
-
-/*
- * Build a set of 'intermediate' watermark values that satisfy both the old
- * state and the new state. These can be programmed to the hardware
- * immediately.
- */
-static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
- const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * Start with the final, target watermarks, then combine with the
- * currently active watermarks to get values that are safe both before
- * and after the vblank.
- */
- *a = new_crtc_state->wm.ilk.optimal;
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state) ||
- state->skip_intermediate_wm)
- return 0;
-
- a->pipe_enabled |= b->pipe_enabled;
- a->sprites_enabled |= b->sprites_enabled;
- a->sprites_scaled |= b->sprites_scaled;
-
- for (level = 0; level <= max_level; level++) {
- struct intel_wm_level *a_wm = &a->wm[level];
- const struct intel_wm_level *b_wm = &b->wm[level];
-
- a_wm->enable &= b_wm->enable;
- a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
- a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
- a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
- a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
- }
-
- /*
- * We need to make sure that these merged watermark values are
- * actually a valid configuration themselves. If they're not,
- * there's no safe way to transition from the old state to
- * the new state, so we need to fail the atomic transaction.
- */
- if (!ilk_validate_pipe_wm(dev_priv, a))
- return -EINVAL;
-
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-/*
- * Merge the watermarks from all active pipes for a specific level.
- */
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
- int level,
- struct intel_wm_level *ret_wm)
-{
- const struct intel_crtc *crtc;
-
- ret_wm->enable = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
- const struct intel_wm_level *wm = &active->wm[level];
-
- if (!active->pipe_enabled)
- continue;
-
- /*
- * The watermark values may have been used in the past,
- * so we must maintain them in the registers for some
- * time even if the level is now disabled.
- */
- if (!wm->enable)
- ret_wm->enable = false;
-
- ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
- ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
- ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
- ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
- }
-}
-
-/*
- * Merge all low power watermarks for all active pipes.
- */
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
- const struct intel_wm_config *config,
- const struct ilk_wm_maximums *max,
- struct intel_pipe_wm *merged)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int last_enabled_level = max_level;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
- config->num_pipes_active > 1)
- last_enabled_level = 0;
-
- /* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
-
- /* merge each WM1+ level */
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- ilk_merge_wm_level(dev_priv, level, wm);
-
- if (level > last_enabled_level)
- wm->enable = false;
- else if (!ilk_validate_wm_level(level, max, wm))
- /* make sure all following levels get disabled */
- last_enabled_level = level - 1;
-
- /*
- * The spec says it is preferred to disable
- * FBC WMs instead of disabling a WM level.
- */
- if (wm->fbc_val > max->fbc) {
- if (wm->enable)
- merged->fbc_wm_enabled = false;
- wm->fbc_val = 0;
- }
- }
-
- /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
- for (level = 2; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- wm->enable = false;
- }
- }
-}
-
-static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
-{
- /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
- return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
-}
-
-/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
- int level)
-{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 2 * level;
- else
- return dev_priv->display.wm.pri_latency[level];
-}
-
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
- const struct intel_pipe_wm *merged,
- enum intel_ddb_partitioning partitioning,
- struct ilk_wm_values *results)
-{
- struct intel_crtc *crtc;
- int level, wm_lp;
-
- results->enable_fbc_wm = merged->fbc_wm_enabled;
- results->partitioning = partitioning;
-
- /* LP1+ register values */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- const struct intel_wm_level *r;
-
- level = ilk_wm_lp_to_level(wm_lp, merged);
-
- r = &merged->wm[level];
-
- /*
- * Maintain the watermark values even if the level is
- * disabled. Doing otherwise could cause underruns.
- */
- results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
- WM_LP_PRIMARY(r->pri_val) |
- WM_LP_CURSOR(r->cur_val);
-
- if (r->enable)
- results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 8)
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
- else
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
-
- results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
-
- /*
- * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
- * level is disabled. Doing otherwise could cause underruns.
- */
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
- results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
- }
- }
-
- /* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
- const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
- const struct intel_wm_level *r = &pipe_wm->wm[0];
-
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
- continue;
-
- results->wm_pipe[pipe] =
- WM0_PIPE_PRIMARY(r->pri_val) |
- WM0_PIPE_SPRITE(r->spr_val) |
- WM0_PIPE_CURSOR(r->cur_val);
- }
-}
-
-/* Find the result with the highest level enabled. Check for enable_fbc_wm in
- * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int level1 = 0, level2 = 0;
-
- for (level = 1; level <= max_level; level++) {
- if (r1->wm[level].enable)
- level1 = level;
- if (r2->wm[level].enable)
- level2 = level;
- }
-
- if (level1 == level2) {
- if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
- return r2;
- else
- return r1;
- } else if (level1 > level2) {
- return r1;
- } else {
- return r2;
- }
-}
-
-/* dirty bits used to track which watermarks need changes */
-#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
-#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
-#define WM_DIRTY_FBC (1 << 24)
-#define WM_DIRTY_DDB (1 << 25)
-
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
- const struct ilk_wm_values *old,
- const struct ilk_wm_values *new)
-{
- unsigned int dirty = 0;
- enum pipe pipe;
- int wm_lp;
-
- for_each_pipe(dev_priv, pipe) {
- if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
- dirty |= WM_DIRTY_PIPE(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
- }
-
- if (old->enable_fbc_wm != new->enable_fbc_wm) {
- dirty |= WM_DIRTY_FBC;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- if (old->partitioning != new->partitioning) {
- dirty |= WM_DIRTY_DDB;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- /* LP1+ watermarks already deemed dirty, no need to continue */
- if (dirty & WM_DIRTY_LP_ALL)
- return dirty;
-
- /* Find the lowest numbered LP1+ watermark in need of an update... */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
- old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
- break;
- }
-
- /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
- for (; wm_lp <= 3; wm_lp++)
- dirty |= WM_DIRTY_LP(wm_lp);
-
- return dirty;
-}
-
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
- unsigned int dirty)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- bool changed = false;
-
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
- previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
- previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
- previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
- changed = true;
- }
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-
- return changed;
-}
-
-/*
- * The spec says we shouldn't write when we don't need, because every write
- * causes WMs to be re-evaluated, expending some power.
- */
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
- struct ilk_wm_values *results)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- unsigned int dirty;
-
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
- if (!dirty)
- return;
-
- _ilk_disable_lp_wm(dev_priv, dirty);
-
- if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
- if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
- if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
-
- if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
- else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
- }
-
- if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
-
- if (dirty & WM_DIRTY_LP(1) &&
- previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
-
- if (DISPLAY_VER(dev_priv) >= 7) {
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
- }
-
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
-
- dev_priv->display.wm.hw = *results;
-}
-
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
-{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
-}
-
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
- struct intel_wm_config *config)
-{
- struct intel_crtc *crtc;
-
- /* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
-
- if (!wm->pipe_enabled)
- continue;
-
- config->sprites_enabled |= wm->sprites_enabled;
- config->sprites_scaled |= wm->sprites_scaled;
- config->num_pipes_active++;
- }
-}
-
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
- struct ilk_wm_maximums max;
- struct intel_wm_config config = {};
- struct ilk_wm_values results = {};
- enum intel_ddb_partitioning partitioning;
-
- ilk_compute_wm_config(dev_priv, &config);
-
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
-
- /* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
- config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
-
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
- } else {
- best_lp_wm = &lp_wm_1_2;
- }
-
- partitioning = (best_lp_wm == &lp_wm_1_2) ?
- INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
-
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
-
- ilk_write_wm_values(dev_priv, &results);
-}
-
-static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
- enum pipe pipe = crtc->pipe;
-
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
-
- memset(active, 0, sizeof(*active));
-
- active->pipe_enabled = crtc->active;
-
- if (active->pipe_enabled) {
- u32 tmp = hw->wm_pipe[pipe];
-
- /*
- * For active pipes LP0 watermark is marked as
- * enabled, and LP1+ watermaks as disabled since
- * we can't really reverse compute them in case
- * multiple pipes are active.
- */
- active->wm[0].enable = true;
- active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
- active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
- active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
- } else {
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * For inactive pipes, all watermark levels
- * should be marked as enabled but zeroed,
- * which is what we'd compute them to.
- */
- for (level = 0; level <= max_level; level++)
- active->wm[level].enable = true;
- }
-
- crtc->wm.active.ilk = *active;
-}
-
-#define _FW_WM(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
-#define _FW_WM_VLV(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
- wm->sr.fbc = _FW_WM(tmp, FBC_SR);
- wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
- wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
-}
-
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- enum pipe pipe;
- u32 tmp;
-
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] =
- (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_CURSOR] =
- (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE0] =
- (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE1] =
- (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- }
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
- wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- } else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- }
-}
-
-#undef _FW_WM
-#undef _FW_WM_VLV
-
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
- struct intel_crtc *crtc;
-
- g4x_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct g4x_wm_state *active = &crtc->wm.active.g4x;
- struct g4x_pipe_wm *raw;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level, max_level;
-
- active->cxsr = wm->cxsr;
- active->hpll_en = wm->hpll_en;
- active->fbc_en = wm->fbc_en;
-
- active->sr = wm->sr;
- active->hpll = wm->hpll;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm.plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
- }
-
- if (wm->cxsr && wm->hpll_en)
- max_level = G4X_WM_LEVEL_HPLL;
- else if (wm->cxsr)
- max_level = G4X_WM_LEVEL_SR;
- else
- max_level = G4X_WM_LEVEL_NORMAL;
-
- level = G4X_WM_LEVEL_NORMAL;
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- raw->plane[plane_id] = active->wm.plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->sr.plane;
- raw->plane[PLANE_CURSOR] = active->sr.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->sr.fbc;
-
- level = G4X_WM_LEVEL_HPLL;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->hpll.plane;
- raw->plane[PLANE_CURSOR] = active->hpll.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->hpll.fbc;
-
- level++;
- out:
- for_each_plane_id_on_crtc(crtc, plane_id)
- g4x_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- g4x_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.g4x.optimal = *active;
- crtc_state->wm.g4x.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
- "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
- str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
- str_yes_no(wm->fbc_en));
-}
-
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.g4x.raw[level];
-
- raw->plane[plane_id] = 0;
-
- if (plane_id == PLANE_PRIMARY)
- raw->fbc = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.g4x.intermediate =
- crtc_state->wm.g4x.optimal;
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- }
-
- g4x_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
- struct intel_crtc *crtc;
- u32 val;
-
- vlv_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- wm->level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (val & DSP_MAXFIFO_PM5_ENABLE)
- wm->level = VLV_WM_LEVEL_PM5;
-
- /*
- * If DDR DVFS is disabled in the BIOS, Punit
- * will never ack the request. So if that happens
- * assume we don't have to enable/disable DDR DVFS
- * dynamically. To test that just set the REQ_ACK
- * bit to poke the Punit, but don't change the
- * HIGH/LOW bits so that we don't actually change
- * the current state.
- */
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
- "Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
- } else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if ((val & FORCE_DDR_HIGH_FREQ) == 0)
- wm->level = VLV_WM_LEVEL_DDR_DVFS;
- }
-
- vlv_punit_put(dev_priv);
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct vlv_wm_state *active = &crtc->wm.active.vlv;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level;
-
- vlv_get_fifo_size(crtc_state);
-
- active->num_levels = wm->level + 1;
- active->cxsr = wm->cxsr;
-
- for (level = 0; level < active->num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- active->sr[level].plane = wm->sr.plane;
- active->sr[level].cursor = wm->sr.cursor;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm[level].plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
-
- raw->plane[plane_id] =
- vlv_invert_wm_value(active->wm[level].plane[plane_id],
- fifo_state->plane[plane_id]);
- }
- }
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- vlv_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- vlv_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.vlv.optimal = *active;
- crtc_state->wm.vlv.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
-}
-
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- raw->plane[plane_id] = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.vlv.intermediate =
- crtc_state->wm.vlv.optimal;
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- }
-
- vlv_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/*
- * FIXME should probably kill this and improve
- * the real watermark readout/sanitation instead
- */
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-}
-
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc *crtc;
-
- ilk_init_lp_watermarks(dev_priv);
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- ilk_pipe_wm_get_hw_state(crtc);
-
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
-
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
- hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -4281,16 +320,6 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
-
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int general_prio_credits,
int high_prio_credits)
@@ -4335,10 +364,6 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- /* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS);
-
/* Wa_14013723622:tgl,rkl,dg1,adl-s */
if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
@@ -4356,15 +381,6 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen12lp_init_clock_gating(dev_priv);
-
- /* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS);
-}
-
static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_22010146351:xehpsdv */
@@ -4763,12 +779,6 @@ void intel_init_clock_gating(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT(dev_priv))
- lpt_suspend_hw(dev_priv);
-}
-
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
drm_dbg_kms(&dev_priv->drm,
@@ -4784,7 +794,6 @@ CG_FUNCS(pvc);
CG_FUNCS(dg2);
CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
-CG_FUNCS(dg1);
CG_FUNCS(gen12lp);
CG_FUNCS(icl);
CG_FUNCS(cfl);
@@ -4819,7 +828,9 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_PONTEVECCHIO(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
+ else if (IS_PONTEVECCHIO(dev_priv))
dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
else if (IS_DG2(dev_priv))
dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
@@ -4827,8 +838,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_ALDERLAKE_P(dev_priv))
dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
- else if (IS_DG1(dev_priv))
- dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 12)
dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 11)
@@ -4874,117 +883,3 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
}
}
-
-static const struct intel_wm_funcs ilk_wm_funcs = {
- .compute_pipe_wm = ilk_compute_pipe_wm,
- .compute_intermediate_wm = ilk_compute_intermediate_wm,
- .initial_watermarks = ilk_initial_watermarks,
- .optimize_watermarks = ilk_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs vlv_wm_funcs = {
- .compute_pipe_wm = vlv_compute_pipe_wm,
- .compute_intermediate_wm = vlv_compute_intermediate_wm,
- .initial_watermarks = vlv_initial_watermarks,
- .optimize_watermarks = vlv_optimize_watermarks,
- .atomic_update_watermarks = vlv_atomic_update_fifo,
-};
-
-static const struct intel_wm_funcs g4x_wm_funcs = {
- .compute_pipe_wm = g4x_compute_pipe_wm,
- .compute_intermediate_wm = g4x_compute_intermediate_wm,
- .initial_watermarks = g4x_initial_watermarks,
- .optimize_watermarks = g4x_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs pnv_wm_funcs = {
- .update_wm = pnv_update_wm,
-};
-
-static const struct intel_wm_funcs i965_wm_funcs = {
- .update_wm = i965_update_wm,
-};
-
-static const struct intel_wm_funcs i9xx_wm_funcs = {
- .update_wm = i9xx_update_wm,
-};
-
-static const struct intel_wm_funcs i845_wm_funcs = {
- .update_wm = i845_update_wm,
-};
-
-static const struct intel_wm_funcs nop_funcs = {
-};
-
-/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_init(dev_priv);
- return;
- }
-
- /* For cxsr */
- if (IS_PINEVIEW(dev_priv))
- pnv_get_mem_freq(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 5)
- ilk_get_mem_freq(dev_priv);
-
- /* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
-
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
- dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
- dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.funcs.wm = &nop_funcs;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- drm_info(&dev_priv->drm,
- "failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
- } else
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
- else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else {
- drm_err(&dev_priv->drm,
- "unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
- }
-}
-
-void intel_pm_setup(struct drm_i915_private *dev_priv)
-{
- dev_priv->runtime_pm.suspended = false;
- atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c09b872d65c8..f774bddcdca6 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -13,22 +13,6 @@ struct intel_crtc_state;
struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[]);
-
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 129746713d07..cf5122299b6b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -652,6 +652,8 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
rpm->kdev = kdev;
rpm->available = HAS_RUNTIME_PM(i915);
+ rpm->suspended = false;
+ atomic_set(&rpm->wakeref_count, 0);
init_intel_runtime_pm_wakeref(rpm);
INIT_LIST_HEAD(&rpm->lmem_userfault_list);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 7894447c2858..e1e1f34490c8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -32,7 +32,6 @@
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
@@ -2460,7 +2459,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
{
- iounmap(regs);
+ iounmap((void __iomem *)regs);
}
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
@@ -2491,7 +2490,8 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
+ (void __force *)uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index cfc9af8b3d21..9d4c7724e98e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -270,6 +270,60 @@ static bool pxp_component_bound(struct intel_pxp *pxp)
return bound;
}
+static int __pxp_global_teardown_final(struct intel_pxp *pxp)
+{
+ if (!pxp->arb_is_valid)
+ return 0;
+ /*
+ * To ensure synchronous and coherent session teardown completion
+ * in response to suspend or shutdown triggers, don't use a worker.
+ */
+ intel_pxp_mark_termination_in_progress(pxp);
+ intel_pxp_terminate(pxp, false);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
+{
+ if (pxp->arb_is_valid)
+ return 0;
+ /*
+ * The arb-session is currently inactive and we are doing a reset and restart
+ * due to a runtime event. Use the worker that was designed for this.
+ */
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void intel_pxp_end(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (__pxp_global_teardown_final(pxp))
+ drm_dbg(&i915->drm, "PXP end timed out\n");
+
+ mutex_unlock(&pxp->arb_mutex);
+
+ intel_pxp_fini_hw(pxp);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
/*
* the arb session is restarted from the irq work when we receive the
* termination completion interrupt
@@ -286,16 +340,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
mutex_lock(&pxp->arb_mutex);
- if (pxp->arb_is_valid)
- goto unlock;
-
- pxp_queue_termination(pxp);
-
- if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(250))) {
- ret = -ETIMEDOUT;
+ ret = __pxp_global_teardown_restart(pxp);
+ if (ret)
goto unlock;
- }
/* make sure the compiler doesn't optimize the double access */
barrier();
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 04440fada711..3ded0890cd27 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -24,8 +24,10 @@ void intel_pxp_init_hw(struct intel_pxp *pxp);
void intel_pxp_fini_hw(struct intel_pxp *pxp);
void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
int intel_pxp_start(struct intel_pxp *pxp);
+void intel_pxp_end(struct intel_pxp *pxp);
int intel_pxp_key_check(struct intel_pxp *pxp,
struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
index 739f9072fa5f..26f7d9f01bf3 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
@@ -12,6 +12,9 @@
/* PXP-Opcode for Init Session */
#define PXP42_CMDID_INIT_SESSION 0x1e
+/* PXP-Opcode for Invalidate Stream Key */
+#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+
/* PXP-Input-Packet: Init Session (Arb-Session) */
struct pxp42_create_arb_in {
struct pxp_cmd_header header;
@@ -25,4 +28,16 @@ struct pxp42_create_arb_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
+
#endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
index aaa8187a0afb..ae9b151b7cb7 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -28,6 +28,9 @@ struct pxp_cmd_header {
union {
u32 status; /* out */
u32 stream_id; /* in */
+#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0)
+#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1)
+#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2)
};
/* Length of the message (excluding the header) */
u32 buffer_len;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 892d39cc61c1..4f836b317424 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -16,7 +16,7 @@ void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return;
- pxp->arb_is_valid = false;
+ intel_pxp_end(pxp);
intel_pxp_invalidate(pxp);
}
@@ -34,7 +34,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp)
}
}
-void intel_pxp_resume(struct intel_pxp *pxp)
+void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index 586be769104f..06b46f535b42 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -11,7 +11,7 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
void intel_pxp_suspend(struct intel_pxp *pxp);
-void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_resume_complete(struct intel_pxp *pxp);
void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
@@ -22,7 +22,7 @@ static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
-static inline void intel_pxp_resume(struct intel_pxp *pxp)
+static inline void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
}
@@ -32,6 +32,6 @@ static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
#endif
static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
{
- intel_pxp_resume(pxp);
+ intel_pxp_resume_complete(pxp);
}
#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index ae413580b81a..448cacb0465d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -110,14 +110,16 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+ intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
+
return ret;
}
-static void pxp_terminate(struct intel_pxp *pxp)
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
{
int ret;
- pxp->hw_state_invalidated = true;
+ pxp->hw_state_invalidated = post_invalidation_needs_restart;
/*
* if we fail to submit the termination there is no point in waiting for
@@ -165,7 +167,7 @@ static void pxp_session_work(struct work_struct *work)
if (events & PXP_TERMINATION_REQUEST) {
events &= ~PXP_TERMINATION_COMPLETE;
- pxp_terminate(pxp);
+ intel_pxp_terminate(pxp, true);
}
if (events & PXP_TERMINATION_COMPLETE)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
index 903ac52cffa1..ba5788127109 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -12,9 +12,14 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_session_management_init(struct intel_pxp *pxp);
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart);
#else
static inline void intel_pxp_session_management_init(struct intel_pxp *pxp)
{
}
+
+static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
+{
+}
#endif
#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 73aa8015f828..d9d248b48093 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -127,6 +127,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
intel_wakeref_t wakeref;
int ret = 0;
+ if (!HAS_HECI_PXP(i915)) {
+ pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS);
+ if (drm_WARN_ON(&i915->drm, !pxp->dev_link))
+ return -ENODEV;
+ }
+
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = data;
pxp->pxp_component->tee_dev = tee_kdev;
@@ -169,6 +175,11 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL;
mutex_unlock(&pxp->tee_mutex);
+
+ if (pxp->dev_link) {
+ device_link_del(pxp->dev_link);
+ pxp->dev_link = NULL;
+ }
}
static const struct component_ops i915_pxp_tee_component_ops = {
@@ -308,3 +319,38 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
return ret;
}
+
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret, trials = 0;
+
+try_again:
+ memset(&msg_in, 0, sizeof(msg_in));
+ memset(&msg_out, 0, sizeof(msg_out));
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */
+ if ((ret || msg_out.header.status != 0x0) && ++trials < 3)
+ goto try_again;
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n",
+ session_id, ret);
+ else if (msg_out.header.status != 0x0)
+ drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n",
+ session_id, msg_out.header.status);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7dc5f08d1583..007de49e1ea4 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -32,6 +32,9 @@ struct intel_pxp {
* which are protected by &tee_mutex.
*/
struct i915_pxp_component *pxp_component;
+
+ /* @dev_link: Enforce module relationship for power management ordering. */
+ struct device_link *dev_link;
/**
* @pxp_component_added: track if the pxp component has been added.
* Set and cleared in tee init and fini functions respectively.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 2535b9684bd1..d91d0ade8abd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -44,7 +44,7 @@ static void trash_stolen(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
- const resource_size_t size = resource_size(&i915->dsm);
+ const resource_size_t size = resource_size(&i915->dsm.stolen);
unsigned long page;
u32 prng = 0x12345678;
@@ -53,7 +53,7 @@ static void trash_stolen(struct drm_i915_private *i915)
return;
for (page = 0; page < size; page += PAGE_SIZE) {
- const dma_addr_t dma = i915->dsm.start + page;
+ const dma_addr_t dma = i915->dsm.stolen.start + page;
u32 __iomem *s;
int x;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 568840e7ca66..ece97e4faacb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -112,7 +112,7 @@ void mock_init_ggtt(struct intel_gt *gt)
ggtt->vm.i915 = gt->i915;
ggtt->vm.is_ggtt = true;
- ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->gmadr = DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index bba8cb6e8ae4..9f0651d48d41 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -10,6 +10,7 @@
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "vlv_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -42,6 +43,155 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
+static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u16 ddrpll, csipll;
+
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+}
+
+static void chv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+}
+
+static void vlv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+}
+
+static void detect_mem_freq(struct drm_i915_private *i915)
+{
+ if (IS_PINEVIEW(i915))
+ pnv_detect_mem_freq(i915);
+ else if (GRAPHICS_VER(i915) == 5)
+ ilk_detect_mem_freq(i915);
+ else if (IS_CHERRYVIEW(i915))
+ chv_detect_mem_freq(i915);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_detect_mem_freq(i915);
+
+ if (i915->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+}
+
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
return dimm->ranks * 64 / (dimm->width ?: 1);
@@ -507,6 +657,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
int ret;
+ detect_mem_freq(i915);
+
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
new file mode 100644
index 000000000000..6d0204942f7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/pnp.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_gmch.h"
+#include "intel_pci_config.h"
+
+static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915)
+{
+ int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
+
+ i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
+ if (!i915->gmch.pdev) {
+ drm_err(&i915->drm, "bridge device not found\n");
+ return -EIO;
+ }
+
+ return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
+ i915->gmch.pdev);
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_i915_private *i915)
+{
+ int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ i915->gmch.mch_res.name = "i915 MCHBAR";
+ i915->gmch.mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
+ &i915->gmch.mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ i915->gmch.pdev);
+ if (ret) {
+ drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
+ i915->gmch.mch_res.start = 0;
+ return ret;
+ }
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ upper_32_bits(i915->gmch.mch_res.start));
+
+ pci_write_config_dword(i915->gmch.pdev, reg,
+ lower_32_bits(i915->gmch.mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+void intel_gmch_bar_setup(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = false;
+
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ }
+}
+
+void intel_gmch_bar_teardown(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (i915->gmch.mchbar_need_disable) {
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ u32 deven_val;
+
+ pci_read_config_dword(i915->gmch.pdev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (i915->gmch.mch_res.start)
+ release_resource(&i915->gmch.mch_res);
+}
+
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.h b/drivers/gpu/drm/i915/soc/intel_gmch.h
new file mode 100644
index 000000000000..d0133eedc720
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GMCH_H__
+#define __INTEL_GMCH_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_teardown(struct drm_i915_private *i915);
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode);
+
+#endif /* __INTEL_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 6eea6e1a99c0..b98dec3ad817 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -9,6 +9,7 @@
#include "vlv_sideband.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_display_types.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and