summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c32
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h5
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c439
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c6
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c25
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c40
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h52
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c54
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c103
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h54
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h1
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c7
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h31
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c60
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h3
83 files changed, 1326 insertions, 853 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 84ec79b64960..49c86a0d44f9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -156,6 +156,7 @@ gem-y += \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_object.o \
+ gem/i915_gem_object_frontbuffer.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index f444c5b7a27b..008d339d5c21 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -56,7 +56,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read.
*/
- if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50))
+ if (intel_de_wait_for_set_ms(display, IPS_CTL, IPS_ENABLE, 50))
drm_err(display->drm,
"Timed out waiting for IPS enable\n");
}
@@ -78,7 +78,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100))
+ if (intel_de_wait_for_clear_ms(display, IPS_CTL, IPS_ENABLE, 100))
drm_err(display->drm,
"Timed out waiting for IPS disable\n");
} else {
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 6e39d7f2e0c2..51ccc6bd5f21 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -754,10 +754,9 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
static unsigned int
hsw_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
@@ -766,10 +765,9 @@ hsw_primary_max_stride(struct intel_plane *plane,
static unsigned int
ilk_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -781,10 +779,9 @@ ilk_primary_max_stride(struct intel_plane *plane,
unsigned int
i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -796,8 +793,8 @@ i965_plane_max_stride(struct intel_plane *plane,
static unsigned int
i915_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (modifier == I915_FORMAT_MOD_X_TILED)
return 8 * 1024;
@@ -807,8 +804,8 @@ i915_plane_max_stride(struct intel_plane *plane,
static unsigned int
i8xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (plane->i9xx_plane == PLANE_C)
return 4 * 1024;
@@ -1191,10 +1188,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
if (DISPLAY_VER(display) >= 4) {
- if (val & DISP_TILED) {
- plane_config->tiling = I915_TILING_X;
+ if (val & DISP_TILED)
fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
if (val & DISP_ROTATE_180)
plane_config->rotation = DRM_MODE_ROTATE_180;
@@ -1206,14 +1201,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISP_FORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
+
+ fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier);
if (display->platform.haswell || display->platform.broadwell) {
offset = intel_de_read(display,
DSPOFFSET(display, i9xx_plane));
base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
} else if (DISPLAY_VER(display) >= 4) {
- if (plane_config->tiling)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
offset = intel_de_read(display,
DSPTILEOFF(display, i9xx_plane));
else
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index 565dab751301..ec78bf4dd35e 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
enum pipe;
+struct drm_format_info;
struct drm_framebuffer;
struct intel_crtc;
struct intel_display;
@@ -18,8 +19,8 @@ struct intel_plane_state;
#ifdef I915
unsigned int i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation);
unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int colot_plane);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 70d4c1bc70fc..9230792960f2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -148,9 +148,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
- LPTX_IN_PROGRESS, 0,
- 20, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display,
+ DSI_LP_MSG(dsi_trans),
+ LPTX_IN_PROGRESS, 20);
if (ret)
drm_err(display->drm, "LPTX bit not cleared\n");
}
@@ -534,9 +534,8 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
- DDI_BUF_IS_IDLE, 0,
- 500, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 500);
if (ret)
drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
@@ -857,9 +856,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
- ret = intel_de_wait_custom(display, DSI_TRANS_FUNC_CONF(dsi_trans),
- LINK_READY, LINK_READY,
- 2500, 0, NULL);
+ ret = intel_de_wait_for_set_us(display,
+ DSI_TRANS_FUNC_CONF(dsi_trans),
+ LINK_READY, 2500);
if (ret)
drm_err(display->drm, "DSI link not ready\n");
}
@@ -1048,8 +1047,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(display->drm,
"DSI transcoder not enabled\n");
}
@@ -1317,8 +1316,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(display->drm,
"DSI trancoder not disabled\n");
}
@@ -1358,9 +1357,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
tmp &= ~LINK_ULPS_TYPE_LP11;
intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
- LINK_IN_ULPS, LINK_IN_ULPS,
- 10, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, DSI_LP_MSG(dsi_trans),
+ LINK_IN_ULPS, 10);
if (ret)
drm_err(display->drm, "DSI link not in ULPS\n");
}
@@ -1395,9 +1393,8 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
- DDI_BUF_IS_IDLE, DDI_BUF_IS_IDLE,
- 8, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 8);
if (ret)
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 852e4d6db8a3..4b41068e9e35 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -36,7 +36,6 @@
#include "soc/intel_rom.h"
-#include "i915_drv.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
@@ -3145,7 +3144,6 @@ err_free_rom:
static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display,
size_t *sizep)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
vbt = firmware_get_vbt(display, sizep);
@@ -3159,11 +3157,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
*/
if (!vbt && display->platform.dgfx)
with_intel_display_rpm(display)
- vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
+ vbt = oprom_get_vbt(display, intel_rom_spi(display->drm), sizep, "SPI flash");
if (!vbt)
with_intel_display_rpm(display)
- vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
+ vbt = oprom_get_vbt(display, intel_rom_pci(display->drm), sizep, "PCI ROM");
return vbt;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 6ae1374d5c2b..f3687eb63467 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -29,11 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return i915_gem_object_is_protected(to_intel_bo(obj));
}
-void intel_bo_flush_if_display(struct drm_gem_object *obj)
-{
- i915_gem_object_flush_if_display(to_intel_bo(obj));
-}
-
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return i915_gem_fb_mmap(to_intel_bo(obj), vma);
@@ -44,15 +39,40 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i
return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
}
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_get(obj);
+ if (!front)
+ return NULL;
+
+ return &front->base;
+}
+
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
- return i915_gem_object_get_frontbuffer(to_intel_bo(obj));
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_frontbuffer_ref(front);
}
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front)
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
{
- return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front);
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ return i915_gem_object_frontbuffer_put(front);
+}
+
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_flush_if_display(front->obj);
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 48d87019e48a..fc05f680dc76 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -16,13 +16,13 @@ bool intel_bo_is_tiled(struct drm_gem_object *obj);
bool intel_bo_is_userptr(struct drm_gem_object *obj);
bool intel_bo_is_shmem(struct drm_gem_object *obj);
bool intel_bo_is_protected(struct drm_gem_object *obj);
-void intel_bo_flush_if_display(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj);
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front);
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj);
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index f97ccc1a96a7..1f6461be50ef 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -805,29 +805,40 @@ void intel_bw_init_hw(struct intel_display *display)
if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(display) >= 3002)
- tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
- else if (DISPLAY_VER(display) >= 30)
- tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
- else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
- dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
- else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
- xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(display) >= 14)
+ /*
+ * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC
+ * enabled that would impact display bandwidth. However, so far there
+ * are no instructions in Bspec on how to handle that case. Let's
+ * complain if we ever find such a scenario.
+ */
+ if (DISPLAY_VER(display) >= 35)
+ drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw);
+
+ if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) == 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
+ } else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) {
+ if (dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
+ else
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
+ } else if (DISPLAY_VER(display) >= 14) {
tgl_get_bw_info(display, dram_info, &mtl_sa_info);
- else if (display->platform.dg2)
+ } else if (display->platform.dg2) {
dg2_get_bw_info(display);
- else if (display->platform.alderlake_p)
+ } else if (display->platform.alderlake_p) {
tgl_get_bw_info(display, dram_info, &adlp_sa_info);
- else if (display->platform.alderlake_s)
+ } else if (display->platform.alderlake_s) {
tgl_get_bw_info(display, dram_info, &adls_sa_info);
- else if (display->platform.rocketlake)
+ } else if (display->platform.rocketlake) {
tgl_get_bw_info(display, dram_info, &rkl_sa_info);
- else if (DISPLAY_VER(display) == 12)
+ } else if (DISPLAY_VER(display) == 12) {
tgl_get_bw_info(display, dram_info, &tgl_sa_info);
- else if (DISPLAY_VER(display) == 11)
+ } else if (DISPLAY_VER(display) == 11) {
icl_get_bw_info(display, dram_info, &icl_sa_info);
+ }
}
static unsigned int intel_bw_num_active_planes(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index bdb42fcc4cb2..37801c744b05 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -902,9 +902,8 @@ static void bdw_set_cdclk(struct intel_display *display,
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- ret = intel_de_wait_custom(display, LCPLL_CTL,
- LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
- 100, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 100);
if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
@@ -914,9 +913,8 @@ static void bdw_set_cdclk(struct intel_display *display,
intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- ret = intel_de_wait_custom(display, LCPLL_CTL,
- LCPLL_CD_SOURCE_FCLK_DONE, 0,
- 1, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
@@ -1114,7 +1112,7 @@ static void skl_dpll0_enable(struct intel_display *display, int vco)
intel_de_rmw(display, LCPLL1_CTL,
0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "DPLL0 not locked\n");
display->cdclk.hw.vco = vco;
@@ -1128,7 +1126,7 @@ static void skl_dpll0_disable(struct intel_display *display)
intel_de_rmw(display, LCPLL1_CTL,
LCPLL_PLL_ENABLE, 0);
- if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "Couldn't disable DPLL0\n");
display->cdclk.hw.vco = 0;
@@ -1535,6 +1533,41 @@ static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals xe3p_lpd_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 151200, .ratio = 21, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 176400, .ratio = 21, .waveform = 0xaa54 },
+ { .refclk = 38400, .cdclk = 201600, .ratio = 21, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 226800, .ratio = 21, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 252000, .ratio = 21, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 277200, .ratio = 21, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 302400, .ratio = 21, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 327600, .ratio = 21, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 352800, .ratio = 21, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 378000, .ratio = 21, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 710400, .ratio = 37, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 729600, .ratio = 38, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 748800, .ratio = 39, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 768000, .ratio = 40, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 787200, .ratio = 41, .waveform = 0xffff },
+ {}
+};
+
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
@@ -1800,8 +1833,8 @@ static void bxt_de_pll_disable(struct intel_display *display)
intel_de_write(display, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1817,8 +1850,8 @@ static void bxt_de_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_de_wait_for_set(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1830,7 +1863,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display)
BXT_DE_PLL_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1848,7 +1881,7 @@ static void icl_cdclk_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1868,8 +1901,8 @@ static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
drm_err(display->drm, "timeout waiting for FREQ change request ack\n");
val &= ~BXT_DE_PLL_FREQ_REQ;
@@ -3561,7 +3594,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- if (DISPLAY_VERx100(display) >= 3002) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->cdclk.max_cdclk_freq = 787200;
+ } else if (DISPLAY_VERx100(display) >= 3002) {
display->cdclk.max_cdclk_freq = 480000;
} else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
@@ -3912,7 +3947,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->funcs.cdclk = &xe3lpd_cdclk_funcs;
+ display->cdclk.table = xe3p_lpd_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 30) {
display->funcs.cdclk = &xe3lpd_cdclk_funcs;
display->cdclk.table = xe3lpd_cdclk_table;
} else if (DISPLAY_VER(display) >= 20) {
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 1e97020e7304..a217a67ceb43 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1090,18 +1090,19 @@ static void skl_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 tmp;
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
- tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
+ if (DISPLAY_VER(display) < 35) {
+ u32 tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
- if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
- crtc_state->gamma_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
- if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
- crtc_state->csc_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+ }
}
static void skl_color_commit_arm(struct intel_dsb *dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 31e68047f217..82e89cdbe5a5 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -498,10 +498,10 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
- 1000))
+ if (intel_de_wait_for_clear_ms(display,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
+ 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
@@ -553,8 +553,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display, crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
+ if (intel_de_wait_for_clear_ms(display, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
intel_de_write(display, crt->adpa_reg, save_adpa);
@@ -604,8 +604,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display),
- CRT_HOTPLUG_FORCE_DETECT, 1000))
+ if (intel_de_wait_for_clear_ms(display, PORT_HOTPLUG_EN(display),
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_DETECT to go off");
}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 7aa14348aa6d..a10b2425b94d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -182,8 +182,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
static unsigned int
i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return 2048;
}
@@ -343,8 +343,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
static unsigned int
i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return plane->base.dev->mode_config.cursor_width * 4;
}
@@ -1092,3 +1092,23 @@ fail:
return ERR_PTR(ret);
}
+
+void intel_cursor_mode_config_init(struct intel_display *display)
+{
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
+
+ if (display->platform.i845g) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i865g) {
+ mode_config->cursor_width = 512;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
+ } else {
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
index 65a9e7eb88c2..7c269d7381ad 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.h
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -17,4 +17,6 @@ intel_cursor_plane_create(struct intel_display *display,
void intel_cursor_unpin_work(struct kthread_work *base);
+void intel_cursor_mode_config_init(struct intel_display *display);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index b3b506d0e040..d98b4cf6b60e 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -145,9 +145,9 @@ void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_RESET,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_RESET,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_err_once(display->drm,
"Failed to bring PHY %c to idle.\n",
phy_name(phy));
@@ -164,12 +164,10 @@ int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- if (intel_de_wait_custom(display,
- XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ if (intel_de_wait_ms(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_MS, val)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -214,9 +212,9 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -285,9 +283,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -301,9 +299,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -2814,9 +2812,9 @@ void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
@@ -2828,12 +2826,12 @@ void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (intel_de_wait_custom(display, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 2, NULL))
+ if (intel_de_wait_for_clear_ms(display, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask),
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
@@ -2889,35 +2887,34 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of SOC reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+ "PHY %c failed to bring out of SOC reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
lane_pipe_reset);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, NULL))
drm_warn(display->drm,
- "PHY %c failed to request refclk after %dus.\n",
- phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
+ "PHY %c failed to request refclk\n",
+ phy_name(phy));
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
XELPDP_P2_STATE_RESET);
@@ -2925,12 +2922,12 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status,
- XELPDP_PORT_RESET_END_TIMEOUT))
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_END_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dms.\n",
- phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count,
@@ -3065,12 +3062,12 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm, "Port %c PLL not locked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, NULL))
+ drm_warn(display->drm, "Port %c PLL not locked\n",
+ phy_name(phy));
/*
* 11. Follow the Display Voltage Frequency Switching Sequence After
@@ -3189,12 +3186,9 @@ void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 100))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3304,13 +3298,12 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm,
- "Port %c PLL not unlocked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES),
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US))
+ drm_warn(display->drm, "Port %c PLL not unlocked\n",
+ phy_name(phy));
/*
* 6. Follow the Display Voltage Frequency Switching Sequence After
@@ -3353,10 +3346,9 @@ void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 10))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 635b35669348..8df5cd5ce418 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -74,14 +74,13 @@
#define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
#define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15)
-#define XELPDP_MSGBUS_TIMEOUT_SLOW 1
-#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2
+#define XELPDP_MSGBUS_TIMEOUT_MS 1
#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
-#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100
-#define XELPDP_PORT_RESET_END_TIMEOUT 15
+#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2
+#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15
#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 733ef4559131..002ccd47856d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -210,8 +210,8 @@ void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port)
}
static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
- if (intel_de_wait_for_set(display, intel_ddi_buf_status_reg(display, port),
- DDI_BUF_IS_IDLE, 10))
+ if (intel_de_wait_for_set_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
drm_err(display->drm, "Timeout waiting for DDI BUF %c to get idle\n",
port_name(port));
}
@@ -235,8 +235,8 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
}
static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
- if (intel_de_wait_for_clear(display, intel_ddi_buf_status_reg(display, port),
- DDI_BUF_IS_IDLE, 10))
+ if (intel_de_wait_for_clear_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
drm_err(display->drm, "Timeout waiting for DDI BUF %c to get active\n",
port_name(port));
}
@@ -2307,8 +2307,8 @@ void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
+ if (intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
drm_err(display->drm, "Timed out waiting for ACT sent\n");
}
@@ -2383,11 +2383,11 @@ int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
return 0;
if (enabled)
- ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
else
- ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_clear_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
if (ret) {
drm_err(display->drm,
@@ -2577,9 +2577,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
intel_de_rmw(display, reg, 0, set_bits);
- ret = intel_de_wait_custom(display, reg,
- wait_bits, wait_bits,
- 100, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, reg, wait_bits, 100);
if (ret) {
drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
@@ -3079,9 +3077,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
intel_de_rmw(display, reg, clr_bits, 0);
- ret = intel_de_wait_custom(display, reg,
- wait_bits, 0,
- 100, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display, reg, wait_bits, 100);
if (ret)
drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
@@ -3868,9 +3864,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(display,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 2))
+ if (intel_de_wait_for_set_ms(display,
+ dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_IDLE_DONE, 2))
drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 9ecdcf6b73e4..a7ce3b875e06 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -84,20 +84,13 @@ intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
}
static inline u32
-__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
- u32 clear, u32 set)
-{
- return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
-}
-
-static inline u32
intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
intel_dmc_wl_get(display, reg);
- val = __intel_de_rmw_nowl(display, reg, clear, set);
+ val = intel_uncore_rmw(__to_uncore(display), reg, clear, set);
intel_dmc_wl_put(display, reg);
@@ -105,34 +98,16 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
}
static inline int
-__intel_de_wait_for_register_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms)
-{
- return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout_ms);
-}
-
-static inline int
-__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us)
-{
- return __intel_wait_for_register(__to_uncore(display), reg, mask,
- value, fast_timeout_us, 0, NULL);
-}
-
-static inline int
-intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms)
+intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout_ms);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
intel_dmc_wl_put(display, reg);
@@ -140,15 +115,16 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value)
+intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout_ms, out_value);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -156,36 +132,49 @@ intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
- int ret;
-
- intel_dmc_wl_get(display, reg);
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
+}
- ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
- value,
- fast_timeout_us, slow_timeout_ms, out_value);
+static inline int
+intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
+{
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
+}
- intel_dmc_wl_put(display, reg);
+static inline int
+intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL);
+}
- return ret;
+static inline int
+intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL);
}
static inline int
-intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout_ms)
+intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout_ms);
+ return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL);
}
static inline int
-intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout_ms)
+intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout_ms);
+ return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL);
}
/*
@@ -215,6 +204,18 @@ intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
}
static inline u32
+intel_de_rmw_fw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read_fw(display, reg);
+ val = (old & ~clear) | set;
+ intel_de_write_fw(display, reg, val);
+
+ return old;
+}
+
+static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
return intel_uncore_read_notrace(__to_uncore(display), reg);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 42ec78798666..069967114bd9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -359,8 +359,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
- TRANSCONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -547,16 +547,13 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-u32 intel_plane_fb_max_stride(struct drm_device *drm,
- u32 pixel_format, u64 modifier)
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier)
{
- struct intel_display *display = to_intel_display(drm);
struct intel_crtc *crtc;
struct intel_plane *plane;
- if (!HAS_DISPLAY(display))
- return 0;
-
/*
* We assume the primary plane for pipe A has
* the highest stride limits of them all,
@@ -568,10 +565,23 @@ u32 intel_plane_fb_max_stride(struct drm_device *drm,
plane = to_intel_plane(crtc->base.primary);
- return plane->max_stride(plane, pixel_format, modifier,
+ return plane->max_stride(plane, info, modifier,
DRM_MODE_ROTATE_0);
}
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!HAS_DISPLAY(display))
+ return 0;
+
+ return intel_plane_fb_max_stride(display,
+ drm_get_format_info(drm, pixel_format, modifier),
+ modifier);
+}
+
void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
@@ -2658,7 +2668,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
- if (DISPLAY_VER(display) >= 4)
+ if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35)
intel_de_write(display,
TRANS_VSYNCSHIFT(display, cpu_transcoder),
vsyncshift);
@@ -2799,7 +2809,7 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(display) == 2)
+ if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35)
return false;
if (DISPLAY_VER(display) >= 9 ||
@@ -3190,10 +3200,12 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
if (display->platform.haswell && crtc_state->dither)
val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
- if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= TRANSCONF_INTERLACE_IF_ID_ILK;
- else
- val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ if (DISPLAY_VER(display) < 35) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
+ else
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ }
if (display->platform.haswell &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index fc2ef92ccf68..bcc6ccb69d2b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -34,6 +34,7 @@ struct drm_atomic_state;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
+struct drm_format_info;
struct drm_modeset_acquire_ctx;
struct intel_atomic_state;
struct intel_crtc;
@@ -402,8 +403,11 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
int bw_overhead,
struct intel_link_m_n *m_n);
-u32 intel_plane_fb_max_stride(struct drm_device *drm,
- u32 pixel_format, u64 modifier);
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier);
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier);
enum drm_mode_status
intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 893279be8409..9b8414b77c15 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -142,14 +142,13 @@ struct intel_dpll_global {
};
struct intel_frontbuffer_tracking {
+ /* protects busy_bits */
spinlock_t lock;
/*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
+ * Tracking bits for delayed frontbuffer flushing due to gpu activity.
*/
unsigned busy_bits;
- unsigned flip_bits;
};
struct intel_hotplug {
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 0b3fd65dac0f..9bbfdae8d024 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -78,9 +78,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
display->fb_tracking.busy_bits);
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- display->fb_tracking.flip_bits);
-
spin_unlock(&display->fb_tracking.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 328447a5e5e8..1170afaa8680 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1507,6 +1507,7 @@ static const struct {
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
{ 30, 2, &wcl_display },
+ { 35, 0, &xe2_lpd_display },
};
static const struct intel_display_device_info *
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 63942ebf46fb..7e000ba3e08b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -29,6 +29,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_cursor.h"
#include "intel_dbuf_bw.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
@@ -148,17 +149,7 @@ static void intel_mode_config_init(struct intel_display *display)
mode_config->max_height = 2048;
}
- if (display->platform.i845g || display->platform.i865g) {
- mode_config->cursor_width = display->platform.i845g ? 64 : 512;
- mode_config->cursor_height = 1023;
- } else if (display->platform.i830 || display->platform.i85x ||
- display->platform.i915g || display->platform.i915gm) {
- mode_config->cursor_width = 64;
- mode_config->cursor_height = 64;
- } else {
- mode_config->cursor_width = 256;
- mode_config->cursor_height = 256;
- }
+ intel_cursor_mode_config_init(display);
}
static void intel_mode_config_cleanup(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fbfa823b6dce..2a4cc1dcc293 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1292,9 +1292,8 @@ static void hsw_disable_lcpll(struct intel_display *display,
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
- ret = intel_de_wait_custom(display, LCPLL_CTL,
- LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
- 1, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
@@ -1305,7 +1304,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
intel_de_write(display, LCPLL_CTL, val);
intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(display);
@@ -1362,15 +1361,14 @@ static void hsw_restore_lcpll(struct intel_display *display)
val &= ~LCPLL_PLL_DISABLE;
intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- ret = intel_de_wait_custom(display, LCPLL_CTL,
- LCPLL_CD_SOURCE_FCLK_DONE, 0,
- 1, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
if (ret)
drm_err(display->drm,
"Switching back to LCPLL failed\n");
@@ -1438,6 +1436,9 @@ static void intel_pch_reset_handshake(struct intel_display *display,
i915_reg_t reg;
u32 reset_bits;
+ if (DISPLAY_VER(display) >= 35)
+ return;
+
if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index eab7019f2252..f4f7e73acc87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -293,8 +293,8 @@ static void hsw_wait_for_power_well_enable(struct intel_display *display,
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(display, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
+ if (intel_de_wait_for_set_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
@@ -338,9 +338,9 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
*/
reqs = hsw_power_well_requesters(display, regs, pw_idx);
- ret = intel_de_wait_for_clear(display, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- reqs ? 0 : 1);
+ ret = intel_de_wait_for_clear_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
if (!ret)
return;
@@ -359,8 +359,8 @@ static void gen9_wait_for_power_well_fuses(struct intel_display *display,
{
/* Timeout 5us for PG#0, for other PGs 1us */
drm_WARN_ON(display->drm,
- intel_de_wait_for_set(display, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct intel_display *display,
@@ -1358,6 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display)
u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
+ u32 val;
/*
* The BIOS can leave the PHY is some weird state
@@ -1445,12 +1446,11 @@ static void assert_chv_phy_status(struct intel_display *display)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait(display, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10, &val))
drm_err(display->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, display->power.chv_phy_control);
+ val & phy_status_mask, phy_status, display->power.chv_phy_control);
}
#undef BITS_SET
@@ -1476,8 +1476,8 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy), 1))
+ if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
@@ -1867,8 +1867,8 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
* bit.
*/
if (DISPLAY_VER(display) >= 35) {
- if (intel_de_wait_for_set(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
- XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
+ if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
drm_warn(display->drm,
"Timeout waiting for PHY %c AUX channel power to be up\n",
phy_name(phy));
@@ -1888,8 +1888,8 @@ static void xelpdp_aux_power_well_disable(struct intel_display *display,
0);
if (DISPLAY_VER(display) >= 35) {
- if (intel_de_wait_for_clear(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
- XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
+ if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
drm_warn(display->drm,
"Timeout waiting for PHY %c AUX channel to powerdown\n",
phy_name(phy));
@@ -1913,8 +1913,8 @@ static void xe2lpd_pica_power_well_enable(struct intel_display *display,
intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well enable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
@@ -1926,8 +1926,8 @@ static void xe2lpd_pica_power_well_disable(struct intel_display *display,
{
intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well disable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 00600134bda0..38702a9e0f50 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -726,7 +726,6 @@ struct intel_initial_plane_config {
struct intel_memory_region *mem;
resource_size_t phys_base;
struct i915_vma *vma;
- unsigned int tiling;
int size;
u32 base;
u8 rotation;
@@ -1564,8 +1563,8 @@ struct intel_plane {
const struct drm_framebuffer *fb,
int color_plane);
unsigned int (*max_stride)(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation);
bool (*can_async_flip)(u64 modifier);
/* Write all non-self arming plane registers */
void (*update_noarm)(struct intel_dsb *dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index c528aaa679ca..e38e5e87877c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -49,7 +49,8 @@ void intel_display_wa_apply(struct intel_display *display)
*/
static bool intel_display_needs_wa_16025573575(struct intel_display *display)
{
- return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002;
+ return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002 ||
+ DISPLAY_VERx100(display) == 3500;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 0bddb20a7c86..6ebbd97e6351 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3P_LPD_DMC_PATH DMC_PATH(xe3p_lpd)
+MODULE_FIRMWARE(XE3P_LPD_DMC_PATH);
+
#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
@@ -186,7 +189,11 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
{
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VERx100(display) == 3002) {
+
+ if (DISPLAY_VERx100(display) == 3500) {
+ fw_path = XE3P_LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3002) {
fw_path = XE3LPD_3002_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 3000) {
@@ -711,11 +718,11 @@ static bool need_pipedmc_load_program(struct intel_display *display)
static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
{
/*
- * PTL:
+ * Xe3_LPD/Xe3p_LPD:
* - pipe A/B DMC doesn't need save/restore
* - pipe C/D DMC is in PG0, needs manual save/restore
*/
- if (DISPLAY_VER(display) == 30)
+ if (IS_DISPLAY_VER(display, 30, 35))
return pipe >= PIPE_C;
/*
@@ -1712,14 +1719,14 @@ void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
crtc->base.base.id, crtc->base.name);
if (tmp & PIPEDMC_ERROR)
- drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
+ drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC error\n",
crtc->base.base.id, crtc->base.name);
}
int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
if (tmp == 0 && int_vector != 0)
- drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
- crtc->base.base.id, crtc->base.name, tmp);
+ drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC interrupt vector 0x%x\n",
+ crtc->base.base.id, crtc->base.name, int_vector);
}
void intel_pipedmc_enable_event(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index b3bb89ba34f9..73a3101514f3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -179,11 +179,11 @@ static void intel_dmc_wl_work(struct work_struct *work)
if (refcount_read(&wl->refcount))
goto out_unlock;
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -207,17 +207,16 @@ static void __intel_dmc_wl_take(struct intel_display *display)
if (wl->taken)
return;
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ);
/*
* We need to use the atomic variant of the waiting routine
* because the DMC wakelock is also taken in atomic context.
*/
- if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock ack timed out");
return;
}
@@ -360,7 +359,7 @@ void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
* wakelock, because we're just enabling it, so call the
* non-locking version directly here.
*/
- __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
+ intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
@@ -402,7 +401,7 @@ void intel_dmc_wl_disable(struct intel_display *display)
goto out_unlock;
/* Disable wakelock in DMC */
- __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
wl->enabled = false;
@@ -414,7 +413,7 @@ void intel_dmc_wl_disable(struct intel_display *display)
*
* TODO: Get the correct expectation from the hardware team.
*/
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
wl->taken = false;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2e7dbaf511b9..809799f63e32 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -62,9 +62,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
u32 status;
int ret;
- ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
- 0,
- 2, timeout_ms, &status);
+ ret = intel_de_wait_ms(display, ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY, 0,
+ timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index bd757db85927..14ed0ea22dd3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -782,9 +782,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port),
- stream_enc_status, enable ? stream_enc_status : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_ms(display, HDCP_STATUS(display, cpu_transcoder, port),
+ stream_enc_status, enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) {
drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
@@ -821,10 +821,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe),
- STREAM_ENCRYPTION_STATUS,
- enable ? STREAM_ENCRYPTION_STATUS : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_ms(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) {
drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 5df6347a420d..8027bab2951b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -390,7 +390,7 @@ static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct intel_display *display,
enum dpio_phy phy)
{
- if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
+ if (intel_de_wait_for_set_ms(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy);
}
@@ -427,7 +427,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_de_wait_ms(display, BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL))
drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
@@ -1173,6 +1173,7 @@ void vlv_wait_port_ready(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
i915_reg_t dpll_reg;
+ u32 val;
switch (encoder->port) {
default:
@@ -1193,10 +1194,9 @@ void vlv_wait_port_ready(struct intel_encoder *encoder,
break;
}
- if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
+ if (intel_de_wait_ms(display, dpll_reg, port_mask, expected_mask, 1000, &val))
drm_WARN(display->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
encoder->base.base.id, encoder->base.name,
- intel_de_read(display, dpll_reg) & port_mask,
- expected_mask);
+ val & port_mask, expected_mask);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 2e1f67be8eda..4f1db8493a2e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -2019,7 +2019,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
@@ -2165,7 +2165,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 92c433f7b7e2..9c7cf03cf022 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -1395,7 +1395,7 @@ static void skl_ddi_pll_enable(struct intel_display *display,
/* the enable bit is always bit 31 */
intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
+ if (intel_de_wait_for_set_ms(display, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(display->drm, "DPLL %d not locked\n", id);
}
@@ -2057,9 +2057,9 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
0, PORT_PLL_POWER_ENABLE);
- ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
- PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE,
- 200, 0, NULL);
+ ret = intel_de_wait_for_set_us(display,
+ BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 200);
if (ret)
drm_err(display->drm,
"Power state not set for PLL:%d\n", port);
@@ -2122,9 +2122,8 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
- PORT_PLL_LOCK, PORT_PLL_LOCK,
- 200, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_LOCK, 200);
if (ret)
drm_err(display->drm, "PLL %d not locked\n", port);
@@ -2158,9 +2157,9 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
PORT_PLL_POWER_ENABLE, 0);
- ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
- PORT_PLL_POWER_STATE, 0,
- 200, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display,
+ BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 200);
if (ret)
drm_err(display->drm,
"Power state not reset for PLL:%d\n", port);
@@ -3921,7 +3920,7 @@ static void icl_pll_power_enable(struct intel_display *display,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_POWER_STATE, 1))
drm_err(display->drm, "PLL %d Power not enabled\n",
pll->info->id);
}
@@ -3933,7 +3932,7 @@ static void icl_pll_enable(struct intel_display *display,
intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
- if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 1))
drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
@@ -4046,7 +4045,7 @@ static void icl_pll_disable(struct intel_display *display,
intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
- if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 1))
drm_err(display->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -4057,7 +4056,7 @@ static void icl_pll_disable(struct intel_display *display,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_POWER_STATE, 1))
drm_err(display->drm, "PLL %d Power not disabled\n",
pll->info->id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 31edf57a296f..4b815ce6b1fe 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -171,6 +171,9 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_DCS_LONG_WRITE:
ret = mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
+ default:
+ ret = -EINVAL;
+ break;
}
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 064c0d3e8177..b34b4961fe1c 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1326,7 +1326,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
* unclear in Bspec, for now no checking.
*/
stride = intel_fb_pitch(fb, 0, rotation);
- max_stride = plane->max_stride(plane, fb->base.format->format,
+ max_stride = plane->max_stride(plane, fb->base.format,
fb->base.modifier, rotation);
return stride > max_stride;
@@ -1972,7 +1972,8 @@ void intel_add_fb_offsets(int *x, int *y,
static
u32 intel_fb_max_stride(struct intel_display *display,
- u32 pixel_format, u64 modifier)
+ const struct drm_format_info *info,
+ u64 modifier)
{
/*
* Arbitrary limit for gen4+ chosen to match the
@@ -1982,7 +1983,7 @@ u32 intel_fb_max_stride(struct intel_display *display,
*/
if (DISPLAY_VER(display) < 4 || intel_fb_is_ccs_modifier(modifier) ||
intel_fb_modifier_uses_dpt(display, modifier))
- return intel_plane_fb_max_stride(display->drm, pixel_format, modifier);
+ return intel_plane_fb_max_stride(display, info, modifier);
else if (DISPLAY_VER(display) >= 7)
return 256 * 1024;
else
@@ -1996,8 +1997,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
unsigned int tile_width;
if (is_surface_linear(fb, color_plane)) {
- unsigned int max_stride = intel_plane_fb_max_stride(display->drm,
- fb->format->format,
+ unsigned int max_stride = intel_plane_fb_max_stride(display,
+ fb->format,
fb->modifier);
/*
@@ -2055,7 +2056,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
/* FIXME other color planes? */
stride = plane_state->view.color_plane[0].mapping_stride;
- max_stride = plane->max_stride(plane, fb->format->format,
+ max_stride = plane->max_stride(plane, fb->format,
fb->modifier, rotation);
if (stride > max_stride) {
@@ -2194,7 +2195,6 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
return ret;
flush:
- intel_bo_flush_if_display(obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
return ret;
}
@@ -2234,24 +2234,24 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (ret)
goto err_frontbuffer_put;
- ret = -EINVAL;
if (!drm_any_plane_has_format(display->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(display->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
- max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
+ max_stride = intel_fb_max_stride(display, info, mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
drm_dbg_kms(display->drm,
"%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], max_stride);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
@@ -2260,6 +2260,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
@@ -2270,6 +2271,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(display->drm, "bad plane %d handle\n", i);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
@@ -2278,6 +2280,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
@@ -2288,6 +2291,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"ccs aux plane %d pitch (%d) must be %d\n",
i, fb->pitches[i], ccs_aux_stride);
+ ret = -EINVAL;
goto err_bo_framebuffer_fini;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a1e3083022ee..437d2fda20a7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -328,8 +328,8 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
intel_de_write(display, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_de_wait_for_clear(display, FBC_STATUS,
- FBC_STAT_COMPRESSING, 10)) {
+ if (intel_de_wait_for_clear_ms(display, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
drm_dbg_kms(display->drm, "FBC idle timed out\n");
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index f162614a925d..1e9550cb66a3 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -163,10 +163,10 @@ static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
if (preempt &&
- intel_de_wait_for_clear(display,
- PIPEDMC_FQ_STATUS(crtc->pipe),
- PIPEDMC_FQ_STATUS_BUSY,
- intel_flipq_preempt_timeout_ms(display)))
+ intel_de_wait_for_clear_ms(display,
+ PIPEDMC_FQ_STATUS(crtc->pipe),
+ PIPEDMC_FQ_STATUS_BUSY,
+ intel_flipq_preempt_timeout_ms(display)))
drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
crtc->base.base.id, crtc->base.name);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index c88bef22d366..03c4978fa5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,8 +58,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
-#include "i915_active.h"
-#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -104,51 +102,6 @@ static void frontbuffer_flush(struct intel_display *display,
}
/**
- * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @display: display device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct intel_display *display,
- unsigned frontbuffer_bits)
-{
- spin_lock(&display->fb_tracking.lock);
- display->fb_tracking.flip_bits |= frontbuffer_bits;
- /* Remove stale busy bits due to the old buffer. */
- display->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&display->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @display: display device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct intel_display *display,
- unsigned frontbuffer_bits)
-{
- spin_lock(&display->fb_tracking.lock);
- /* Mask any cancelled flips. */
- frontbuffer_bits &= display->fb_tracking.flip_bits;
- display->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&display->fb_tracking.lock);
-
- if (frontbuffer_bits)
- frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
-}
-
-/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -174,12 +127,11 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct intel_display *display = to_intel_display(front->obj->dev);
+ struct intel_display *display = front->display;
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
display->fb_tracking.busy_bits |= frontbuffer_bits;
- display->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&display->fb_tracking.lock);
}
@@ -195,7 +147,10 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct intel_display *display = to_intel_display(front->obj->dev);
+ struct intel_display *display = front->display;
+
+ if (origin == ORIGIN_DIRTYFB)
+ intel_bo_frontbuffer_flush_for_display(front);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -209,12 +164,16 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
frontbuffer_flush(display, frontbuffer_bits, origin);
}
+static void intel_frontbuffer_ref(struct intel_frontbuffer *front)
+{
+ intel_bo_frontbuffer_ref(front);
+}
+
static void intel_frontbuffer_flush_work(struct work_struct *work)
{
struct intel_frontbuffer *front =
container_of(work, struct intel_frontbuffer, flush_work);
- intel_bo_flush_if_display(front->obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
intel_frontbuffer_put(front);
}
@@ -231,93 +190,31 @@ void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
if (!front)
return;
- kref_get(&front->ref);
+ intel_frontbuffer_ref(front);
if (!schedule_work(&front->flush_work))
intel_frontbuffer_put(front);
}
-static int frontbuffer_active(struct i915_active *ref)
+void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm)
{
- struct intel_frontbuffer *front =
- container_of(ref, typeof(*front), write);
-
- kref_get(&front->ref);
- return 0;
+ front->display = to_intel_display(drm);
+ atomic_set(&front->bits, 0);
+ INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
}
-static void frontbuffer_retire(struct i915_active *ref)
+void intel_frontbuffer_fini(struct intel_frontbuffer *front)
{
- struct intel_frontbuffer *front =
- container_of(ref, typeof(*front), write);
-
- intel_frontbuffer_flush(front, ORIGIN_CS);
- intel_frontbuffer_put(front);
+ drm_WARN_ON(front->display->drm, atomic_read(&front->bits));
}
-static void frontbuffer_release(struct kref *ref)
- __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock)
+struct intel_frontbuffer *intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct intel_frontbuffer *ret, *front =
- container_of(ref, typeof(*front), ref);
- struct drm_gem_object *obj = front->obj;
- struct intel_display *display = to_intel_display(obj->dev);
-
- drm_WARN_ON(display->drm, atomic_read(&front->bits));
-
- i915_ggtt_clear_scanout(to_intel_bo(obj));
-
- ret = intel_bo_set_frontbuffer(obj, NULL);
- drm_WARN_ON(display->drm, ret);
- spin_unlock(&display->fb_tracking.lock);
-
- i915_active_fini(&front->write);
-
- drm_gem_object_put(obj);
- kfree_rcu(front, rcu);
-}
-
-struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_gem_object *obj)
-{
- struct intel_display *display = to_intel_display(obj->dev);
- struct intel_frontbuffer *front, *cur;
-
- front = intel_bo_get_frontbuffer(obj);
- if (front)
- return front;
-
- front = kmalloc(sizeof(*front), GFP_KERNEL);
- if (!front)
- return NULL;
-
- drm_gem_object_get(obj);
-
- front->obj = obj;
- kref_init(&front->ref);
- atomic_set(&front->bits, 0);
- i915_active_init(&front->write,
- frontbuffer_active,
- frontbuffer_retire,
- I915_ACTIVE_RETIRE_SLEEPS);
- INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
-
- spin_lock(&display->fb_tracking.lock);
- cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&display->fb_tracking.lock);
-
- if (cur != front) {
- drm_gem_object_put(obj);
- kfree(front);
- }
-
- return cur;
+ return intel_bo_frontbuffer_get(obj);
}
void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
- kref_put_lock(&front->ref,
- frontbuffer_release,
- &to_intel_display(front->obj->dev)->fb_tracking.lock);
+ intel_bo_frontbuffer_put(front);
}
/**
@@ -346,17 +243,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
- struct intel_display *display = to_intel_display(old->obj->dev);
-
- drm_WARN_ON(display->drm,
+ drm_WARN_ON(old->display->drm,
!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- struct intel_display *display = to_intel_display(new->obj->dev);
-
- drm_WARN_ON(display->drm,
+ drm_WARN_ON(new->display->drm,
atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 2fee12eaf9b6..22677acb4c06 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -26,10 +26,9 @@
#include <linux/atomic.h>
#include <linux/bits.h>
-#include <linux/kref.h>
-
-#include "i915_active_types.h"
+#include <linux/workqueue_types.h>
+struct drm_device;
struct drm_gem_object;
struct intel_display;
@@ -42,12 +41,8 @@ enum fb_op_origin {
};
struct intel_frontbuffer {
- struct kref ref;
+ struct intel_display *display;
atomic_t bits;
- struct i915_active write;
- struct drm_gem_object *obj;
- struct rcu_head rcu;
-
struct work_struct flush_work;
};
@@ -68,10 +63,6 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct intel_display *display,
- unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct intel_display *display,
- unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
@@ -144,4 +135,7 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
+void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm);
+void intel_frontbuffer_fini(struct intel_frontbuffer *front);
+
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 82f3a40ecac7..795012d7c24c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -449,7 +449,7 @@ gmbus_wait_idle(struct intel_display *display)
add_wait_queue(&display->gmbus.wait_queue, &wait);
intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
+ ret = intel_de_wait_fw_ms(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7195e8cf671c..5e1a96223a9c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -410,9 +410,8 @@ static int intel_hdcp_load_keys(struct intel_display *display)
}
/* Wait for the keys to load (500us) */
- ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
- HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
- 10, 1, &val);
+ ret = intel_de_wait_ms(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE,
+ HDCP_KEY_LOAD_DONE, 1, &val);
if (ret)
return ret;
else if (!(val & HDCP_KEY_LOAD_STATUS))
@@ -428,7 +427,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
{
intel_de_write(display, HDCP_SHA_TEXT, sha_text);
- if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
+ if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -707,8 +706,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Tell the HW we're done with the hash and wait for it to ACK */
intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_de_wait_for_set(display, HDCP_REP_CTL,
- HDCP_SHA1_COMPLETE, 1)) {
+ if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -856,9 +855,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- HDCP_STATUS_AN_READY, 1)) {
+ if (intel_de_wait_for_set_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_AN_READY, 1)) {
drm_err(display->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -953,10 +952,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- HDCP_STATUS_ENC,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_set_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_ENC,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(display->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -1013,9 +1012,9 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
- if (intel_de_wait_for_clear(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_clear_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(display->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1940,11 +1939,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
0, CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_set(display,
- HDCP2_STATUS(display, cpu_transcoder,
- port),
- LINK_ENCRYPTION_STATUS,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_set_ms(display,
+ HDCP2_STATUS(display, cpu_transcoder, port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
dig_port->hdcp.auth_status = true;
return ret;
@@ -1966,11 +1964,10 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
CTL_LINK_ENCRYPTION_REQ, 0);
- ret = intel_de_wait_for_clear(display,
- HDCP2_STATUS(display, cpu_transcoder,
- port),
- LINK_ENCRYPTION_STATUS,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_clear_ms(display,
+ HDCP2_STATUS(display, cpu_transcoder, port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(display->drm, "Disable Encryption Timedout");
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 5c637341b210..908faf17f93d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1598,8 +1598,8 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- ret = intel_de_wait_for_set(display, HDCP_STATUS(display, cpu_transcoder, port),
- HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1);
+ ret = intel_de_wait_for_set_ms(display, HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1);
if (ret) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
index af48d6cde226..a67eb4f7f897 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -6,7 +6,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -14,6 +13,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpll_mgr.h"
#include "intel_hdmi.h"
#include "intel_lt_phy.h"
@@ -31,6 +31,32 @@
#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\
INTEL_LT_PHY_LANE0)
#define MODE_DP 3
+#define Q32_TO_INT(x) ((x) >> 32)
+#define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF)
+#define DCO_MIN_FREQ_MHZ 11850
+#define REF_CLK_KHZ 38400
+#define TDC_RES_MULTIPLIER 10000000ULL
+
+struct phy_param_t {
+ u32 val;
+ u32 addr;
+};
+
+struct lt_phy_params {
+ struct phy_param_t pll_reg4;
+ struct phy_param_t pll_reg3;
+ struct phy_param_t pll_reg5;
+ struct phy_param_t pll_reg57;
+ struct phy_param_t lf;
+ struct phy_param_t tdc;
+ struct phy_param_t ssc;
+ struct phy_param_t bias2;
+ struct phy_param_t bias_trim;
+ struct phy_param_t dco_med;
+ struct phy_param_t dco_fine;
+ struct phy_param_t ssc_inj;
+ struct phy_param_t surv_bonus;
+};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
.clock = 162000,
@@ -1041,9 +1067,9 @@ static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_P2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
phy_name(phy));
@@ -1175,13 +1201,11 @@ intel_lt_phy_lane_reset(struct intel_encoder *encoder,
XELPDP_LANE_PCLK_PLL_REQUEST(0),
XELPDP_LANE_PCLK_PLL_REQUEST(0));
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- XELPDP_LANE_PCLK_PLL_ACK(0),
- XELPDP_LANE_PCLK_PLL_ACK(0),
- XE3PLPD_MACCLK_TURNON_LATENCY_US,
- XE3PLPD_MACCLK_TURNON_LATENCY_MS, NULL))
- drm_warn(display->drm, "PHY %c PLL MacCLK assertion Ack not done after %dus.\n",
- phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_MS * 1000);
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL MacCLK assertion ack not done\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
XELPDP_FORWARD_CLOCK_UNGATE,
@@ -1190,18 +1214,17 @@ intel_lt_phy_lane_reset(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
lane_pipe_reset | lane_phy_pulse_status, 0);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status, 0,
- XE3PLPD_RESET_END_LATENCY_US, 2, NULL))
- drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XE3PLPD_RESET_END_LATENCY_US);
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XE3PLPD_RESET_END_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_pulse_status, lane_phy_pulse_status,
- XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 0, NULL))
- drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
- phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL rate not changed\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
}
@@ -1356,10 +1379,308 @@ intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
return false;
}
+static u64 mul_q32_u32(u64 a_q32, u32 b)
+{
+ u64 p0, p1, carry, result;
+ u64 x_hi = a_q32 >> 32;
+ u64 x_lo = a_q32 & 0xFFFFFFFFULL;
+
+ p0 = x_lo * (u64)b;
+ p1 = x_hi * (u64)b;
+ carry = p0 >> 32;
+ result = (p1 << 32) + (carry << 32) + (p0 & 0xFFFFFFFFULL);
+
+ return result;
+}
+
+static bool
+calculate_target_dco_and_loop_cnt(u32 frequency_khz, u64 *target_dco_mhz, u32 *loop_cnt)
+{
+ u32 ppm_value = 1;
+ u32 dco_min_freq = DCO_MIN_FREQ_MHZ;
+ u32 dco_max_freq = 16200;
+ u32 dco_min_freq_low = 10000;
+ u32 dco_max_freq_low = 12000;
+ u64 val = 0;
+ u64 refclk_khz = REF_CLK_KHZ;
+ u64 m2div = 0;
+ u64 val_with_frac = 0;
+ u64 ppm = 0;
+ u64 temp0 = 0, temp1, scale;
+ int ppm_cnt, dco_count, y;
+
+ for (ppm_cnt = 0; ppm_cnt < 5; ppm_cnt++) {
+ ppm_value = ppm_cnt == 2 ? 2 : 1;
+ for (dco_count = 0; dco_count < 2; dco_count++) {
+ if (dco_count == 1) {
+ dco_min_freq = dco_min_freq_low;
+ dco_max_freq = dco_max_freq_low;
+ }
+ for (y = 2; y <= 255; y += 2) {
+ val = div64_u64((u64)y * frequency_khz, 200);
+ m2div = div64_u64(((u64)(val) << 32), refclk_khz);
+ m2div = mul_q32_u32(m2div, 500);
+ val_with_frac = mul_q32_u32(m2div, refclk_khz);
+ val_with_frac = div64_u64(val_with_frac, 500);
+ temp1 = Q32_TO_INT(val_with_frac);
+ temp0 = (temp1 > val) ? (temp1 - val) :
+ (val - temp1);
+ ppm = div64_u64(temp0, val);
+ if (temp1 >= dco_min_freq &&
+ temp1 <= dco_max_freq &&
+ ppm < ppm_value) {
+ /* Round to two places */
+ scale = (1ULL << 32) / 100;
+ temp0 = DIV_ROUND_UP_ULL(val_with_frac,
+ scale);
+ *target_dco_mhz = temp0 * scale;
+ *loop_cnt = y;
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static void set_phy_vdr_addresses(struct lt_phy_params *p, int pll_type)
+{
+ p->pll_reg4.addr = PLL_REG_ADDR(PLL_REG4_ADDR, pll_type);
+ p->pll_reg3.addr = PLL_REG_ADDR(PLL_REG3_ADDR, pll_type);
+ p->pll_reg5.addr = PLL_REG_ADDR(PLL_REG5_ADDR, pll_type);
+ p->pll_reg57.addr = PLL_REG_ADDR(PLL_REG57_ADDR, pll_type);
+ p->lf.addr = PLL_REG_ADDR(PLL_LF_ADDR, pll_type);
+ p->tdc.addr = PLL_REG_ADDR(PLL_TDC_ADDR, pll_type);
+ p->ssc.addr = PLL_REG_ADDR(PLL_SSC_ADDR, pll_type);
+ p->bias2.addr = PLL_REG_ADDR(PLL_BIAS2_ADDR, pll_type);
+ p->bias_trim.addr = PLL_REG_ADDR(PLL_BIAS_TRIM_ADDR, pll_type);
+ p->dco_med.addr = PLL_REG_ADDR(PLL_DCO_MED_ADDR, pll_type);
+ p->dco_fine.addr = PLL_REG_ADDR(PLL_DCO_FINE_ADDR, pll_type);
+ p->ssc_inj.addr = PLL_REG_ADDR(PLL_SSC_INJ_ADDR, pll_type);
+ p->surv_bonus.addr = PLL_REG_ADDR(PLL_SURV_BONUS_ADDR, pll_type);
+}
+
+static void compute_ssc(struct lt_phy_params *p, u32 ana_cfg)
+{
+ int ssc_stepsize = 0;
+ int ssc_steplen = 0;
+ int ssc_steplog = 0;
+
+ p->ssc.val = (1 << 31) | (ana_cfg << 24) | (ssc_steplog << 16) |
+ (ssc_stepsize << 8) | ssc_steplen;
+}
+
+static void compute_bias2(struct lt_phy_params *p)
+{
+ u32 ssc_en_local = 0;
+ u64 dynctrl_ovrd_en = 0;
+
+ p->bias2.val = (dynctrl_ovrd_en << 31) | (ssc_en_local << 30) |
+ (1 << 23) | (1 << 24) | (32 << 16) | (1 << 8);
+}
+
+static void compute_tdc(struct lt_phy_params *p, u64 tdc_fine)
+{
+ u32 settling_time = 15;
+ u32 bias_ovr_en = 1;
+ u32 coldstart = 1;
+ u32 true_lock = 2;
+ u32 early_lock = 1;
+ u32 lock_ovr_en = 1;
+ u32 lock_thr = tdc_fine ? 3 : 5;
+ u32 unlock_thr = tdc_fine ? 5 : 11;
+
+ p->tdc.val = (u32)((2 << 30) + (settling_time << 16) + (bias_ovr_en << 15) +
+ (lock_ovr_en << 14) + (coldstart << 12) + (true_lock << 10) +
+ (early_lock << 8) + (unlock_thr << 4) + lock_thr);
+}
+
+static void compute_dco_med(struct lt_phy_params *p)
+{
+ u32 cselmed_en = 0;
+ u32 cselmed_dyn_adj = 0;
+ u32 cselmed_ratio = 39;
+ u32 cselmed_thr = 8;
+
+ p->dco_med.val = (cselmed_en << 31) + (cselmed_dyn_adj << 30) +
+ (cselmed_ratio << 24) + (cselmed_thr << 21);
+}
+
+static void compute_dco_fine(struct lt_phy_params *p, u32 dco_12g)
+{
+ u32 dco_fine0_tune_2_0 = 0;
+ u32 dco_fine1_tune_2_0 = 0;
+ u32 dco_fine2_tune_2_0 = 0;
+ u32 dco_fine3_tune_2_0 = 0;
+ u32 dco_dith0_tune_2_0 = 0;
+ u32 dco_dith1_tune_2_0 = 0;
+
+ dco_fine0_tune_2_0 = dco_12g ? 4 : 3;
+ dco_fine1_tune_2_0 = 2;
+ dco_fine2_tune_2_0 = dco_12g ? 2 : 1;
+ dco_fine3_tune_2_0 = 5;
+ dco_dith0_tune_2_0 = dco_12g ? 4 : 3;
+ dco_dith1_tune_2_0 = 2;
+
+ p->dco_fine.val = (dco_dith1_tune_2_0 << 19) +
+ (dco_dith0_tune_2_0 << 16) +
+ (dco_fine3_tune_2_0 << 11) +
+ (dco_fine2_tune_2_0 << 8) +
+ (dco_fine1_tune_2_0 << 3) +
+ dco_fine0_tune_2_0;
+}
+
+int
+intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
+ u32 frequency_khz)
+{
+#define DATA_ASSIGN(i, pll_reg) \
+ do { \
+ lt_state->data[i][0] = (u8)((((pll_reg).val) & 0xFF000000) >> 24); \
+ lt_state->data[i][1] = (u8)((((pll_reg).val) & 0x00FF0000) >> 16); \
+ lt_state->data[i][2] = (u8)((((pll_reg).val) & 0x0000FF00) >> 8); \
+ lt_state->data[i][3] = (u8)((((pll_reg).val) & 0x000000FF)); \
+ } while (0)
+#define ADDR_ASSIGN(i, pll_reg) \
+ do { \
+ lt_state->addr_msb[i] = ((pll_reg).addr >> 8) & 0xFF; \
+ lt_state->addr_lsb[i] = (pll_reg).addr & 0xFF; \
+ } while (0)
+
+ bool found = false;
+ struct lt_phy_params p;
+ u32 dco_fmin = DCO_MIN_FREQ_MHZ;
+ u64 refclk_khz = REF_CLK_KHZ;
+ u32 refclk_mhz_int = REF_CLK_KHZ / 1000;
+ u64 m2div = 0;
+ u64 target_dco_mhz = 0;
+ u64 tdc_fine, tdc_targetcnt;
+ u64 feedfwd_gain ,feedfwd_cal_en;
+ u64 tdc_res = 30;
+ u32 prop_coeff;
+ u32 int_coeff;
+ u32 ndiv = 1;
+ u32 m1div = 1, m2div_int, m2div_frac;
+ u32 frac_en;
+ u32 ana_cfg;
+ u32 loop_cnt = 0;
+ u32 gain_ctrl = 2;
+ u32 postdiv = 0;
+ u32 dco_12g = 0;
+ u32 pll_type = 0;
+ u32 d1 = 2, d3 = 5, d4 = 0, d5 = 0;
+ u32 d6 = 0, d6_new = 0;
+ u32 d7, d8 = 0;
+ u32 bonus_7_0 = 0;
+ u32 csel2fo = 11;
+ u32 csel2fo_ovrd_en = 1;
+ u64 temp0, temp1, temp2, temp3;
+
+ p.surv_bonus.val = (bonus_7_0 << 16);
+ p.pll_reg4.val = (refclk_mhz_int << 17) +
+ (ndiv << 9) + (1 << 4);
+ p.bias_trim.val = (csel2fo_ovrd_en << 30) + (csel2fo << 24);
+ p.ssc_inj.val = 0;
+ found = calculate_target_dco_and_loop_cnt(frequency_khz, &target_dco_mhz, &loop_cnt);
+ if (!found)
+ return -EINVAL;
+
+ m2div = div64_u64(target_dco_mhz, (refclk_khz * ndiv * m1div));
+ m2div = mul_q32_u32(m2div, 1000);
+ if (Q32_TO_INT(m2div) > 511)
+ return -EINVAL;
+
+ m2div_int = (u32)Q32_TO_INT(m2div);
+ m2div_frac = (u32)(Q32_TO_FRAC(m2div));
+ frac_en = (m2div_frac > 0) ? 1 : 0;
+
+ if (frac_en > 0)
+ tdc_res = 70;
+ else
+ tdc_res = 36;
+ tdc_fine = tdc_res > 50 ? 1 : 0;
+ temp0 = tdc_res * 40 * 11;
+ temp1 = div64_u64(((4 * TDC_RES_MULTIPLIER) + temp0) * 500, temp0 * refclk_khz);
+ temp2 = div64_u64(temp0 * refclk_khz, 1000);
+ temp3 = div64_u64(((8 * TDC_RES_MULTIPLIER) + temp2), temp2);
+ tdc_targetcnt = tdc_res < 50 ? (int)(temp1) : (int)(temp3);
+ tdc_targetcnt = (int)(tdc_targetcnt / 2);
+ temp0 = mul_q32_u32(target_dco_mhz, tdc_res);
+ temp0 >>= 32;
+ feedfwd_gain = (m2div_frac > 0) ? div64_u64(m1div * TDC_RES_MULTIPLIER, temp0) : 0;
+ feedfwd_cal_en = frac_en;
+
+ temp0 = (u32)Q32_TO_INT(target_dco_mhz);
+ prop_coeff = (temp0 >= dco_fmin) ? 3 : 4;
+ int_coeff = (temp0 >= dco_fmin) ? 7 : 8;
+ ana_cfg = (temp0 >= dco_fmin) ? 8 : 6;
+ dco_12g = (temp0 >= dco_fmin) ? 0 : 1;
+
+ if (temp0 > 12960)
+ d7 = 10;
+ else
+ d7 = 8;
+
+ d8 = loop_cnt / 2;
+ d4 = d8 * 2;
+
+ /* Compute pll_reg3,5,57 & lf */
+ p.pll_reg3.val = (u32)((d4 << 21) + (d3 << 18) + (d1 << 15) + (m2div_int << 5));
+ p.pll_reg5.val = m2div_frac;
+ postdiv = (d5 == 0) ? 9 : d5;
+ d6_new = (d6 == 0) ? 40 : d6;
+ p.pll_reg57.val = (d7 << 24) + (postdiv << 15) + (d8 << 7) + d6_new;
+ p.lf.val = (u32)((frac_en << 31) + (1 << 30) + (frac_en << 29) +
+ (feedfwd_cal_en << 28) + (tdc_fine << 27) +
+ (gain_ctrl << 24) + (feedfwd_gain << 16) +
+ (int_coeff << 12) + (prop_coeff << 8) + tdc_targetcnt);
+
+ compute_ssc(&p, ana_cfg);
+ compute_bias2(&p);
+ compute_tdc(&p, tdc_fine);
+ compute_dco_med(&p);
+ compute_dco_fine(&p, dco_12g);
+
+ pll_type = ((frequency_khz == 10000) || (frequency_khz == 20000) ||
+ (frequency_khz == 2500) || (dco_12g == 1)) ? 0 : 1;
+ set_phy_vdr_addresses(&p, pll_type);
+
+ lt_state->config[0] = 0x84;
+ lt_state->config[1] = 0x2d;
+ ADDR_ASSIGN(0, p.pll_reg4);
+ ADDR_ASSIGN(1, p.pll_reg3);
+ ADDR_ASSIGN(2, p.pll_reg5);
+ ADDR_ASSIGN(3, p.pll_reg57);
+ ADDR_ASSIGN(4, p.lf);
+ ADDR_ASSIGN(5, p.tdc);
+ ADDR_ASSIGN(6, p.ssc);
+ ADDR_ASSIGN(7, p.bias2);
+ ADDR_ASSIGN(8, p.bias_trim);
+ ADDR_ASSIGN(9, p.dco_med);
+ ADDR_ASSIGN(10, p.dco_fine);
+ ADDR_ASSIGN(11, p.ssc_inj);
+ ADDR_ASSIGN(12, p.surv_bonus);
+ DATA_ASSIGN(0, p.pll_reg4);
+ DATA_ASSIGN(1, p.pll_reg3);
+ DATA_ASSIGN(2, p.pll_reg5);
+ DATA_ASSIGN(3, p.pll_reg57);
+ DATA_ASSIGN(4, p.lf);
+ DATA_ASSIGN(5, p.tdc);
+ DATA_ASSIGN(6, p.ssc);
+ DATA_ASSIGN(7, p.bias2);
+ DATA_ASSIGN(8, p.bias_trim);
+ DATA_ASSIGN(9, p.dco_med);
+ DATA_ASSIGN(10, p.dco_fine);
+ DATA_ASSIGN(11, p.ssc_inj);
+ DATA_ASSIGN(12, p.surv_bonus);
+
+ return 0;
+}
+
static int
-intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
+intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
{
-#define REF_CLK_KHZ 38400
#define REGVAL(i) ( \
(lt_state->data[i][3]) | \
(lt_state->data[i][2] << 8) | \
@@ -1367,6 +1688,9 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
(lt_state->data[i][0] << 24) \
)
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_lt_phy_pll_state *lt_state =
+ &crtc_state->dpll_hw_state.ltpll;
int clk = 0;
u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
u64 temp0, temp1;
@@ -1409,11 +1733,14 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
* frequency = (m2div * refclk_khz / (d8 * 10))
*/
d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
+ if (d8 == 0) {
+ drm_WARN_ON(display->drm,
+ "Invalid port clock using lowest HDMI portclock\n");
+ return xe3plpd_lt_hdmi_252.clock;
+ }
m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
temp1 = (u64)m2div_int * REF_CLK_KHZ;
- if (d8 == 0)
- return 0;
clk = div_u64((temp1 + temp0), d8 * 10);
@@ -1442,7 +1769,7 @@ intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
lt_state->config[0]);
clk = intel_lt_phy_get_dp_clock(rate);
} else {
- clk = intel_lt_phy_calc_hdmi_port_clock(lt_state);
+ clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state);
}
return clk;
@@ -1472,7 +1799,10 @@ intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
}
}
- /* TODO: Add a function to compute the data for HDMI TMDS*/
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return intel_lt_phy_calculate_hdmi_state(&crtc_state->dpll_hw_state.ltpll,
+ crtc_state->port_clock);
+ }
return -EINVAL;
}
@@ -1651,11 +1981,11 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
/* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- XELPDP_LANE_PCLK_PLL_ACK(0), 0,
- XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
- drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
- phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
+ phy_name(phy));
/*
* 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
@@ -1671,12 +2001,11 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
XELPDP_LANE_PCLK_PLL_REQUEST(0));
/* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- XELPDP_LANE_PCLK_PLL_ACK(0),
- XELPDP_LANE_PCLK_PLL_ACK(0),
- XE3PLPD_MACCLK_TURNON_LATENCY_US, 2, NULL))
- drm_warn(display->drm, "PHY %c PLL MacCLK Ack assertion Timeout after %dus.\n",
- phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_US);
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack assertion timeout\n",
+ phy_name(phy));
/*
* 13. Ungate the forward clock by setting
@@ -1700,11 +2029,11 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
rate_update, MB_WRITE_COMMITTED);
/* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_pulse_status, lane_phy_pulse_status,
- XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 2, NULL))
- drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
- phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL rate not changed\n",
+ phy_name(phy));
/* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
@@ -1758,13 +2087,11 @@ void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
lane_pipe_reset);
/* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status,
- lane_phy_current_status,
- XE3PLPD_RESET_START_LATENCY_US, 0, NULL))
- drm_warn(display->drm,
- "PHY %c failed to reset Lane after %dms.\n",
- phy_name(phy), XE3PLPD_RESET_START_LATENCY_US);
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XE3PLPD_RESET_START_LATENCY_US))
+ drm_warn(display->drm, "PHY %c failed to reset lane\n",
+ phy_name(phy));
/* 4. Clear for PHY pulse status on owned PHY lanes. */
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
@@ -1783,11 +2110,11 @@ void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
/* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- XELPDP_LANE_PCLK_PLL_ACK(0), 0,
- XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
- drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
- phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
+ phy_name(phy));
/*
* 9. Follow the Display Voltage Frequency Switching -
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
index a538d4c69210..b7911acd7dcd 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -35,6 +35,9 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_lt_phy_pll_state *pll_state);
void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int
+intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
+ u32 frequency_khz);
void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
index 9223487d764e..98ccc069a69b 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -7,13 +7,12 @@
#define __INTEL_LT_PHY_REGS_H__
#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
-#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 1
-#define XE3PLPD_MACCLK_TURNON_LATENCY_US 21
+#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 2
#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
-#define XE3PLPD_RATE_CALIB_DONE_LATENCY_US 50
+#define XE3PLPD_RATE_CALIB_DONE_LATENCY_MS 1
#define XE3PLPD_RESET_START_LATENCY_US 10
#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
-#define XE3PLPD_RESET_END_LATENCY_US 200
+#define XE3PLPD_RESET_END_LATENCY_MS 2
/* LT Phy MAC Register */
#define LT_PHY_MAC_VDR _MMIO(0xC00)
@@ -72,4 +71,20 @@
#define XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(port, lane) _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(__xe2lpd_port_idx(port), \
lane)
#define XE3LPD_PORT_P2M_ADDR_MASK REG_GENMASK(11, 0)
+
+#define PLL_REG4_ADDR 0x8510
+#define PLL_REG3_ADDR 0x850C
+#define PLL_REG5_ADDR 0x8514
+#define PLL_REG57_ADDR 0x85E4
+#define PLL_LF_ADDR 0x860C
+#define PLL_TDC_ADDR 0x8610
+#define PLL_SSC_ADDR 0x8614
+#define PLL_BIAS2_ADDR 0x8618
+#define PLL_BIAS_TRIM_ADDR 0x8648
+#define PLL_DCO_MED_ADDR 0x8640
+#define PLL_DCO_FINE_ADDR 0x864C
+#define PLL_SSC_INJ_ADDR 0x8624
+#define PLL_SURV_BONUS_ADDR 0x8644
+#define PLL_TYPE_OFFSET 0x200
+#define PLL_REG_ADDR(base, pll_type) ((pll_type) ? (base) + PLL_TYPE_OFFSET : (base))
#endif /* __INTEL_LT_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 48f4d8ed4f15..89aeb4fb340e 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -329,7 +329,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ if (intel_de_wait_for_set_ms(display, PP_STATUS(display, 0), PP_ON, 5000))
drm_err(display->drm,
"timed out waiting for panel to power on\n");
@@ -345,7 +345,7 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_ON, 1000))
drm_err(display->drm,
"timed out waiting for panel to power off\n");
@@ -384,7 +384,7 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 231b1632d64a..88eb7ae5765c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -308,8 +308,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
overlay->old_vma = overlay->vma;
if (vma)
overlay->vma = i915_vma_get(vma);
@@ -366,7 +364,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
@@ -822,8 +820,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_pin_section;
}
- i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
-
if (!overlay->active) {
const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 3456c794e0e7..16619f7be5f8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -305,7 +305,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
}
intel_de_write(display, reg, val | TRANS_ENABLE);
- if (intel_de_wait_for_set(display, reg, TRANS_STATE_ENABLE, 100))
+ if (intel_de_wait_for_set_ms(display, reg, TRANS_STATE_ENABLE, 100))
drm_err(display->drm, "failed to enable transcoder %c\n",
pipe_name(pipe));
}
@@ -326,7 +326,7 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
reg = PCH_TRANSCONF(pipe);
intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, reg, TRANS_STATE_ENABLE, 50))
drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
@@ -572,8 +572,8 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_INTERLACE_PROGRESSIVE;
intel_de_write(display, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(display, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 100))
+ if (intel_de_wait_for_set_ms(display, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
@@ -581,8 +581,8 @@ static void lpt_disable_pch_transcoder(struct intel_display *display)
{
intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index cca880c7eed4..9a89bb6dcf65 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -21,17 +21,15 @@ static void lpt_fdi_reset_mphy(struct intel_display *display)
intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
- FDI_MPHY_IOSFSB_RESET_STATUS, FDI_MPHY_IOSFSB_RESET_STATUS,
- 100, 0, NULL);
+ ret = intel_de_wait_for_set_us(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100);
if (ret)
drm_err(display->drm, "FDI mPHY reset assert timeout\n");
intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
- FDI_MPHY_IOSFSB_RESET_STATUS, 0,
- 100, 0, NULL);
+ ret = intel_de_wait_for_clear_us(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100);
if (ret)
drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 505c776c0585..5105e3278bc4 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -1051,6 +1051,9 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
DISPLAY_VERx100(display) == 3002) &&
src_x % 2 != 0)
hsub = 2;
+
+ if (DISPLAY_VER(display) == 35)
+ vsub = 2;
} else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 81444e0b3c41..a1de1ec564d1 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -133,6 +133,7 @@ initial_plane_vma(struct intel_display *display,
struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
+ unsigned int tiling;
u32 base, size;
u64 pinctl;
@@ -179,17 +180,19 @@ initial_plane_vma(struct intel_display *display,
i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
- switch (plane_config->tiling) {
+ tiling = intel_fb_modifier_to_tiling(plane_config->fb->base.modifier);
+
+ switch (tiling) {
case I915_TILING_NONE:
break;
case I915_TILING_X:
case I915_TILING_Y:
obj->tiling_and_stride =
plane_config->fb->base.pitches[0] |
- plane_config->tiling;
+ tiling;
break;
default:
- MISSING_CASE(plane_config->tiling);
+ MISSING_CASE(tiling);
goto err_obj;
}
@@ -374,7 +377,7 @@ valid_fb:
plane_state->uapi.crtc_w = fb->width;
plane_state->uapi.crtc_h = fb->height;
- if (plane_config->tiling)
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
dev_priv->preserve_bios_swizzle = true;
plane_state->uapi.fb = fb;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index f52abd4e2eb0..dc44a7a169c1 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -390,12 +390,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
{
- return !(intel_de_wait_for_clear(display,
- XELPDP_INITIATE_PMDEMAND_REQUEST(1),
- XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
- intel_de_wait_for_clear(display,
- GEN12_DCPR_STATUS_1,
- XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
+ return !(intel_de_wait_for_clear_ms(display,
+ XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
+ intel_de_wait_for_clear_ms(display,
+ GEN12_DCPR_STATUS_1,
+ XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
}
void
@@ -462,9 +462,9 @@ static void intel_pmdemand_poll(struct intel_display *display)
u32 status;
int ret;
- ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
- XELPDP_PMDEMAND_REQ_ENABLE, 0,
- 50, timeout_ms, &status);
+ ret = intel_de_wait_ms(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 05014ffe3ce1..00ac652809cc 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -626,6 +626,10 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
int ret;
+ /* TODO: Enable Panel Replay on MST once it's properly implemented. */
+ if (intel_dp->mst_detect == DRM_DP_MST)
+ return;
+
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
if (ret < 0)
@@ -932,7 +936,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
u32 current_dc_state = intel_display_power_get_current_dc_state(display);
- struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
current_dc_state != DC_STATE_EN_UPTO_DC6) ||
@@ -2272,8 +2277,8 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_de_wait_for_clear(display, psr_status,
- psr_status_mask, 2000))
+ if (intel_de_wait_for_clear_ms(display, psr_status,
+ psr_status_mask, 2000))
drm_err(display->drm, "Timed out waiting PSR idle state\n");
}
@@ -3161,7 +3166,7 @@ _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state
return true;
}
- return intel_de_wait_for_clear(display,
+ return intel_de_wait_for_clear_ms(display,
EDP_PSR2_STATUS(display, cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
PSR_IDLE_TIMEOUT_MS);
@@ -3181,7 +3186,7 @@ _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state
return true;
}
- return intel_de_wait_for_clear(display,
+ return intel_de_wait_for_clear_ms(display,
psr_status_reg(display, cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK,
PSR_IDLE_TIMEOUT_MS);
@@ -3259,7 +3264,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
mutex_unlock(&intel_dp->psr.lock);
- err = intel_de_wait_for_clear(display, reg, mask, 50);
+ err = intel_de_wait_for_clear_ms(display, reg, mask, 50);
if (err)
drm_err(display->drm,
"Timed out waiting for PSR Idle for re-enable\n");
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c
index dfcff924f0ed..b636a0060d39 100644
--- a/drivers/gpu/drm/i915/display/intel_sbi.c
+++ b/drivers/gpu/drm/i915/display/intel_sbi.c
@@ -21,7 +21,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg,
lockdep_assert_held(&display->sbi.lock);
- if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
+ if (intel_de_wait_fw_ms(display, SBI_CTL_STAT,
+ SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
drm_err(display->drm, "timeout waiting for SBI to become ready\n");
return -EBUSY;
}
@@ -37,7 +38,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg,
cmd |= SBI_CTL_OP_WR;
intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY);
- if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
+ if (intel_de_wait_fw_ms(display, SBI_CTL_STAT,
+ SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
drm_err(display->drm, "timeout waiting for SBI to complete read\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 4f028e6a91cd..295030742294 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -42,8 +42,8 @@ void intel_snps_phy_wait_for_calibration(struct intel_display *display)
* which phy was affected and skip setup of the corresponding
* output later.
*/
- if (intel_de_wait_for_clear(display, DG2_PHY_MISC(phy),
- DG2_PHY_DP_TX_ACK_MASK, 25))
+ if (intel_de_wait_for_clear_ms(display, DG2_PHY_MISC(phy),
+ DG2_PHY_DP_TX_ACK_MASK, 25))
display->snps.phy_failed_calibration |= BIT(phy);
}
}
@@ -1863,7 +1863,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* is locked at new settings. This register bit is sampling PHY
* dp_mpllb_state interface signal.
*/
- if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 5))
drm_dbg_kms(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
@@ -1903,7 +1903,7 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
* (dp_txX_ack) that the new transmitter setting request is completed.
*/
- if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 5))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 5))
drm_err(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 60f1d9ed181e..69b6873a6044 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -958,10 +958,9 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -973,10 +972,9 @@ g4x_sprite_max_stride(struct intel_plane *plane,
static unsigned int
hsw_sprite_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 7e17ca018748..1e21fd02685d 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -1076,8 +1076,8 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
{
/* check if mailbox is running busy */
- if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
- TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
drm_dbg_kms(display->drm,
"Timeout waiting for TCSS mailbox run/busy bit to clear\n");
return;
@@ -1089,8 +1089,8 @@ static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enabl
TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
/* wait to clear mailbox running busy bit before continuing */
- if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
- TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
drm_dbg_kms(display->drm,
"Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
return;
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 00cbc126fb36..b92c42fde937 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -716,9 +716,9 @@ static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(old_crtc_state));
- if (intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000))
+ if (intel_de_wait_for_clear_ms(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000))
drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ba1bf0bd4c55..89c8003ccfe7 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -465,12 +465,11 @@ static int icl_plane_max_height(const struct drm_framebuffer *fb,
static unsigned int
plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation,
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation,
unsigned int max_pixels,
unsigned int max_bytes)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
if (drm_rotation_90_or_270(rotation))
@@ -481,26 +480,26 @@ plane_max_stride(struct intel_plane *plane,
static unsigned int
adl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
unsigned int max_pixels = 65536; /* PLANE_OFFSET limit */
unsigned int max_bytes = 128 * 1024;
- return plane_max_stride(plane, pixel_format,
+ return plane_max_stride(plane, info,
modifier, rotation,
max_pixels, max_bytes);
}
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
unsigned int max_pixels = 8192; /* PLANE_OFFSET limit */
unsigned int max_bytes = 32 * 1024;
- return plane_max_stride(plane, pixel_format,
+ return plane_max_stride(plane, info,
modifier, rotation,
max_pixels, max_bytes);
}
@@ -1748,7 +1747,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
if (rotation & DRM_MODE_REFLECT_X &&
- fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ fb->modifier == DRM_FORMAT_MOD_LINEAR &&
+ DISPLAY_VER(display) < 35) {
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] horizontal flip is not supported with linear surface formats\n",
plane->base.base.id, plane->base.name);
@@ -3083,7 +3083,6 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX, alpha);
- fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
@@ -3091,11 +3090,9 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = DRM_FORMAT_MOD_LINEAR;
break;
case PLANE_CTL_TILED_X:
- plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
if (DISPLAY_VER(display) >= 14)
fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS;
@@ -3136,6 +3133,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
+ fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier);
+
if (!display->params.enable_dpt &&
intel_fb_modifier_uses_dpt(display, fb->modifier)) {
drm_dbg_kms(display->drm, "DPT disabled, skipping initial FB\n");
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index 7c944d3ca855..6f815b231340 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -324,7 +324,7 @@
#define PLANE_WM_IGNORE_LINES REG_BIT(30)
#define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
-#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_WM_BLOCKS_MASK REG_GENMASK(12, 0)
#define _PLANE_WM_SAGV_1_A 0x70258
#define _PLANE_WM_SAGV_1_B 0x71258
@@ -375,10 +375,10 @@
_PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \
_PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
-/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
-#define PLANE_BUF_END_MASK REG_GENMASK(27, 16)
+/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits, xe3p_lpd 13 bits */
+#define PLANE_BUF_END_MASK REG_GENMASK(28, 16)
#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end))
-#define PLANE_BUF_START_MASK REG_GENMASK(11, 0)
+#define PLANE_BUF_START_MASK REG_GENMASK(12, 0)
#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start))
#define _PLANE_MIN_BUF_CFG_1_A 0x70274
@@ -389,9 +389,9 @@
_PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \
_PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B)
#define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31)
-#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16)
+#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(28, 16)
#define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val))
-#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(12, 0)
#define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val))
/* tgl+ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 6d050408618c..54e9e0be019d 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -635,15 +635,22 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ const struct drm_mode_config *mode_config = &display->drm->mode_config;
+ const struct drm_format_info *info;
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ u64 modifier;
+ u32 format;
int level;
- ret = skl_compute_wm_params(crtc_state, 256,
- drm_format_info(DRM_FORMAT_ARGB8888),
- DRM_FORMAT_MOD_LINEAR,
- DRM_MODE_ROTATE_0,
+ format = DRM_FORMAT_ARGB8888;
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
+ info = drm_get_format_info(display->drm, format, modifier);
+
+ ret = skl_compute_wm_params(crtc_state, mode_config->cursor_width,
+ info, modifier, DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
drm_WARN_ON(display->drm, ret);
@@ -1812,6 +1819,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else if (DISPLAY_VER(display) >= 35) {
+ selected_result = method2;
} else {
if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
@@ -3183,6 +3192,13 @@ static void sanitize_wm_latency(struct intel_display *display)
int level, num_levels = display->wm.num_levels;
/*
+ * Xe3p and beyond should ignore level 0's reported latency and
+ * always apply WaWmMemoryReadLatency logic.
+ */
+ if (DISPLAY_VER(display) >= 35)
+ wm[0] = 0;
+
+ /*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
@@ -3477,7 +3493,10 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
if (!HAS_MBUS_JOINING(display))
return;
- if (DISPLAY_VER(display) >= 20)
+ if (DISPLAY_VER(display) >= 35)
+ intel_de_rmw(display, MBUS_CTL, XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ XE3P_MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+ else if (DISPLAY_VER(display) >= 20)
intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
@@ -3488,9 +3507,14 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
ratio, str_yes_no(joined_mbus));
for_each_dbuf_slice(display, slice)
- intel_de_rmw(display, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+ if (DISPLAY_VER(display) >= 35)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
+ XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+ else
+ intel_de_rmw(display, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
}
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index c5572fc0e847..abf56ac31105 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -32,16 +32,18 @@
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
-#define MBUS_CTL _MMIO(0x4438C)
-#define MBUS_JOIN REG_BIT(31)
-#define MBUS_HASHING_MODE_MASK REG_BIT(30)
-#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
-#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
-#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
-#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
-#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
-#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
-#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(16, 13)
+#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
+#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
+#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
/*
* The below are numbered starting from "S1" on gen11/gen12, but starting
@@ -51,20 +53,22 @@
* way things will be named by the hardware team going forward, plus it's more
* consistent with how most of the rest of our registers are named.
*/
-#define _DBUF_CTL_S0 0x45008
-#define _DBUF_CTL_S1 0x44FE8
-#define _DBUF_CTL_S2 0x44300
-#define _DBUF_CTL_S3 0x44304
-#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
- _DBUF_CTL_S0, \
- _DBUF_CTL_S1, \
- _DBUF_CTL_S2, \
- _DBUF_CTL_S3))
-#define DBUF_POWER_REQUEST REG_BIT(31)
-#define DBUF_POWER_STATE REG_BIT(30)
-#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
-#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
-#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
+#define _DBUF_CTL_S0 0x45008
+#define _DBUF_CTL_S1 0x44FE8
+#define _DBUF_CTL_S2 0x44300
+#define _DBUF_CTL_S3 0x44304
+#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
+ _DBUF_CTL_S0, \
+ _DBUF_CTL_S1, \
+ _DBUF_CTL_S2, \
+ _DBUF_CTL_S3))
+#define DBUF_POWER_REQUEST REG_BIT(31)
+#define DBUF_POWER_STATE REG_BIT(30)
+#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
+#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
+#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(20, 16)
+#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x)
+#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
#define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */
#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 444682995658..19bdd8662359 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -94,8 +94,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port),
- mask, 100))
+ if (intel_de_wait_for_set_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ mask, 100))
drm_err(display->drm, "DPI FIFOs are not empty\n");
}
@@ -162,8 +162,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
- data_mask, 50))
+ if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ data_mask, 50))
drm_err(display->drm,
"Timeout waiting for HS/LP DATA FIFO !full\n");
@@ -176,8 +176,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
GEN_READ_DATA_AVAIL);
}
- if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
- ctrl_mask, 50)) {
+ if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ ctrl_mask, 50)) {
drm_err(display->drm,
"Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -188,8 +188,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port),
- data_mask, 50))
+ if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port),
+ data_mask, 50))
drm_err(display->drm,
"Timeout waiting for read data.\n");
@@ -246,7 +246,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100))
+ if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port), mask, 100))
drm_err(display->drm,
"Video mode command 0x%08x send failed.\n", cmd);
@@ -352,8 +352,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_MIPIIO_PORT_POWERED, 20))
drm_err(display->drm, "MIPIO port is powergated\n");
}
@@ -374,8 +374,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not ON\n");
}
@@ -394,8 +394,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_ULPS_NOT_ACTIVE, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_ULPS_NOT_ACTIVE, 20))
drm_err(display->drm, "ULPS not active\n");
/* Exit ULPS */
@@ -413,16 +413,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_DATA_LANE_STOP_STATE, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_DATA_LANE_STOP_STATE, 20))
drm_err(display->drm,
"Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port),
- AFE_LATCHOUT, 20))
+ if (intel_de_wait_for_set_ms(display, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
drm_err(display->drm,
"D-PHY not entering LP-11 state\n");
}
@@ -519,15 +519,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_MIPIIO_PORT_POWERED, 20))
drm_err(display->drm,
"MIPI IO Port is not powergated\n");
}
@@ -544,8 +544,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not turning OFF\n");
}
@@ -595,8 +595,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((display->platform.broxton || port == PORT_A) &&
- intel_de_wait_for_clear(display, port_ctrl,
- AFE_LATCHOUT, 30))
+ intel_de_wait_for_clear_ms(display, port_ctrl,
+ AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index f078b9cda96c..a2da6285890b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -319,8 +319,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED, 1))
+ if (intel_de_wait_for_clear_ms(display, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
@@ -568,8 +568,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED, 1)) {
+ if (intel_de_wait_for_set_ms(display, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index cd9686a7ded2..189ecdd0a9c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -194,8 +194,8 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * cpp, 64);
/* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(dev, format,
- DRM_FORMAT_MOD_LINEAR))
+ if (args->pitch > intel_dumb_fb_max_stride(dev, format,
+ DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
if (args->pitch < args->width)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index e1348a0043db..3f6f040c359d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -477,24 +477,24 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (front) {
- intel_frontbuffer_flush(front, origin);
- intel_frontbuffer_put(front);
+ intel_frontbuffer_flush(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
}
}
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (front) {
- intel_frontbuffer_invalidate(front, origin);
- intel_frontbuffer_put(front);
+ intel_frontbuffer_invalidate(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
new file mode 100644
index 000000000000..aaa15e7b3f17
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "i915_gem_object_frontbuffer.h"
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(&front->base, ORIGIN_CS);
+ i915_gem_object_frontbuffer_put(front);
+}
+
+struct i915_frontbuffer *
+i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_frontbuffer *front, *cur;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front)
+ return front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ intel_frontbuffer_init(&front->base, &i915->drm);
+
+ kref_init(&front->ref);
+ i915_gem_object_get(obj);
+ front->obj = obj;
+
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ frontbuffer_retire,
+ I915_ACTIVE_RETIRE_SLEEPS);
+
+ spin_lock(&i915->frontbuffer_lock);
+ if (rcu_access_pointer(obj->frontbuffer)) {
+ cur = rcu_dereference_protected(obj->frontbuffer, true);
+ kref_get(&cur->ref);
+ } else {
+ cur = front;
+ rcu_assign_pointer(obj->frontbuffer, front);
+ }
+ spin_unlock(&i915->frontbuffer_lock);
+
+ if (cur != front) {
+ i915_gem_object_put(obj);
+ intel_frontbuffer_fini(&front->base);
+ kfree(front);
+ }
+
+ return cur;
+}
+
+void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front)
+{
+ kref_get(&front->ref);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&i915->frontbuffer_lock)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ i915_ggtt_clear_scanout(obj);
+
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
+
+ spin_unlock(&i915->frontbuffer_lock);
+
+ i915_active_fini(&front->write);
+
+ i915_gem_object_put(obj);
+
+ intel_frontbuffer_fini(&front->base);
+
+ kfree_rcu(front, rcu);
+}
+
+void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ kref_put_lock(&front->ref, frontbuffer_release,
+ &i915->frontbuffer_lock);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index b682969e3a29..2133e29047c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -12,6 +12,14 @@
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
+struct i915_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_i915_gem_object *obj;
+ struct i915_active write;
+ struct rcu_head rcu;
+ struct kref ref;
+};
+
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
@@ -33,19 +41,23 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+struct i915_frontbuffer *i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj);
+void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front);
+void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front);
+
/**
- * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
- * @obj: The object whose frontbuffer to get.
+ * i915_gem_object_frontbuffer_lookup - Look up the object's frontbuffer
+ * @obj: The object whose frontbuffer to look up.
*
* Get pointer to object's frontbuffer if such exists. Please note that RCU
* mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
*
* Return: pointer to object's frontbuffer is such exists or NULL
*/
-static inline struct intel_frontbuffer *
-i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
+static inline struct i915_frontbuffer *
+i915_gem_object_frontbuffer_lookup(const struct drm_i915_gem_object *obj)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
if (likely(!rcu_access_pointer(obj->frontbuffer)))
return NULL;
@@ -62,41 +74,11 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
if (likely(front == rcu_access_pointer(obj->frontbuffer)))
break;
- intel_frontbuffer_put(front);
+ i915_gem_object_frontbuffer_put(front);
} while (1);
rcu_read_unlock();
return front;
}
-/**
- * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
- * @obj: The object whose frontbuffer to set.
- * @front: The frontbuffer to set
- *
- * Set object's frontbuffer pointer. If frontbuffer is already set for the
- * object keep it and return it's pointer to the caller. Please note that RCU
- * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
- * function is protected by i915->display->fb_tracking.lock
- *
- * Return: pointer to frontbuffer which was set.
- */
-static inline struct intel_frontbuffer *
-i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
- struct intel_frontbuffer *front)
-{
- struct intel_frontbuffer *cur = front;
-
- if (!front) {
- RCU_INIT_POINTER(obj->frontbuffer, NULL);
- } else if (rcu_access_pointer(obj->frontbuffer)) {
- cur = rcu_dereference_protected(obj->frontbuffer, true);
- kref_get(&cur->ref);
- } else {
- rcu_assign_pointer(obj->frontbuffer, front);
- }
-
- return cur;
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 64600aa8227f..465ce94aee76 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -574,7 +574,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- struct intel_frontbuffer __rcu *frontbuffer;
+ struct i915_frontbuffer __rcu *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95f9ddf22ce4..5381a934a671 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -311,6 +311,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ spinlock_t frontbuffer_lock; /* protects obj->frontbuffer (write-side) */
+
struct intel_pxp *pxp;
struct i915_pmu pmu;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 638fe20dfe54..4c82c9544b93 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1299,6 +1299,8 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
+
+ spin_lock_init(&dev_priv->frontbuffer_lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 354ef75ef6a5..5bf3b4ab2baa 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1233,6 +1233,7 @@
#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef9c272e60e5..1ce2dac75a66 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1992,13 +1992,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (flags & EXEC_OBJECT_WRITE) {
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (unlikely(front)) {
- if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ if (intel_frontbuffer_invalidate(&front->base, ORIGIN_CS))
i915_active_add_request(&front->write, rq);
- intel_frontbuffer_put(front);
+ i915_gem_object_frontbuffer_put(front);
}
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 2e16346a6cc0..3e588762709a 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -686,6 +686,7 @@ static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *
static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
+ struct intel_display *display = i915->display;
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
@@ -724,6 +725,9 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
/* PSF GV points not supported in D14+ */
+ if (DISPLAY_VER(display) >= 35)
+ dram_info->ecc_impacting_de_bw = REG_FIELD_GET(XE3P_ECC_IMPACTING_DE, val);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index 03a973f1c941..8475ee379daa 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -30,6 +30,7 @@ struct dram_info {
u8 num_channels;
u8 num_qgv_points;
u8 num_psf_gv_points;
+ bool ecc_impacting_de_bw; /* Only valid from Xe3p_LPD onward. */
bool symmetric_memory;
bool has_16gb_dimms;
};
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c
index 243d98cab8c3..2f17dc856e7f 100644
--- a/drivers/gpu/drm/i915/soc/intel_rom.c
+++ b/drivers/gpu/drm/i915/soc/intel_rom.c
@@ -39,8 +39,9 @@ static u16 spi_read16(struct intel_rom *rom, loff_t offset)
return spi_read32(rom, offset) & 0xffff;
}
-struct intel_rom *intel_rom_spi(struct drm_i915_private *i915)
+struct intel_rom *intel_rom_spi(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_rom *rom;
u32 static_region;
@@ -85,7 +86,7 @@ static void pci_free(struct intel_rom *rom)
pci_unmap_rom(rom->pdev, rom->oprom);
}
-struct intel_rom *intel_rom_pci(struct drm_i915_private *i915)
+struct intel_rom *intel_rom_pci(struct drm_device *drm)
{
struct intel_rom *rom;
@@ -93,7 +94,7 @@ struct intel_rom *intel_rom_pci(struct drm_i915_private *i915)
if (!rom)
return NULL;
- rom->pdev = to_pci_dev(i915->drm.dev);
+ rom->pdev = to_pci_dev(drm->dev);
rom->oprom = pci_map_rom(rom->pdev, &rom->size);
if (!rom->oprom) {
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h
index fb2979c8ef7f..4e59a375787e 100644
--- a/drivers/gpu/drm/i915/soc/intel_rom.h
+++ b/drivers/gpu/drm/i915/soc/intel_rom.h
@@ -8,11 +8,11 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct drm_device;
struct intel_rom;
-struct intel_rom *intel_rom_spi(struct drm_i915_private *i915);
-struct intel_rom *intel_rom_pci(struct drm_i915_private *i915);
+struct intel_rom *intel_rom_spi(struct drm_device *drm);
+struct intel_rom *intel_rom_pci(struct drm_device *drm);
u32 intel_rom_read32(struct intel_rom *rom, loff_t offset);
u16 intel_rom_read16(struct intel_rom *rom, loff_t offset);
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index 4465c40f8134..b17e3bab23d5 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -26,8 +26,6 @@ struct i915_vma {
struct xe_ggtt_node *node;
};
-#define i915_ggtt_clear_scanout(bo) do { } while (0)
-
#define i915_vma_fence_id(vma) -1
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index d012f02bc84f..d93ddacdf743 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -91,27 +91,6 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
-static inline int intel_wait_for_register(struct intel_uncore *uncore,
- i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout)
-{
- struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
-
- return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, NULL, false);
-}
-
-static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
- i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout,
- u32 *out_value)
-{
- struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
-
- return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, out_value, false);
-}
-
static inline int
__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
u32 mask, u32 value, unsigned int fast_timeout_us,
@@ -133,6 +112,16 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
out_value, atomic);
}
+static inline int
+__intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t i915_reg,
+ u32 mask, u32 value, unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
+{
+ return __intel_wait_for_register(uncore, i915_reg, mask, value,
+ fast_timeout_us, slow_timeout_ms,
+ out_value);
+}
+
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 27437c22bd70..bad2243b9114 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -5,6 +5,7 @@
#include "xe_bo.h"
#include "intel_bo.h"
+#include "intel_frontbuffer.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
@@ -28,10 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return xe_bo_is_protected(gem_to_xe_bo(obj));
}
-void intel_bo_flush_if_display(struct drm_gem_object *obj)
-{
-}
-
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(obj, vma);
@@ -44,15 +41,60 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i
return xe_bo_read(bo, offset, dst, size);
}
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+struct xe_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_gem_object *obj;
+ struct kref ref;
+};
+
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
+{
+ struct xe_frontbuffer *front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ intel_frontbuffer_init(&front->base, obj->dev);
+
+ kref_init(&front->ref);
+
+ drm_gem_object_get(obj);
+ front->obj = obj;
+
+ return &front->base;
+}
+
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
- return NULL;
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_get(&front->ref);
+}
+
+static void frontbuffer_release(struct kref *ref)
+{
+ struct xe_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+
+ intel_frontbuffer_fini(&front->base);
+
+ drm_gem_object_put(front->obj);
+
+ kfree(front);
+}
+
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_put(&front->ref, frontbuffer_release);
}
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front)
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
{
- return front;
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 7baf15f51575..0b2fa7c56d38 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -656,9 +656,6 @@ struct xe_device {
*/
u32 edram_size_mb;
- /* To shut up runtime pm macros.. */
- struct xe_runtime_pm {} runtime_pm;
-
struct intel_uncore {
spinlock_t lock;
} uncore;