summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/skl_watermark.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/skl_watermark.c')
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c1313
1 files changed, 787 insertions, 526 deletions
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 56588d6e24ae..f4458d1185b3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -3,26 +3,37 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_blend.h>
#include "i915_drv.h"
-#include "i915_fixed.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_crtc.h"
+#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fixed.h"
#include "intel_pcode.h"
#include "intel_wm.h"
+#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+/*It is expected that DSB can do posted writes to every register in
+ * the pipe and planes within 100us. For flip queue use case, the
+ * recommended DSB execution time is 100us + one SAGV block time.
+ */
+#define DSB_EXE_TIME 100
+
static void skl_sagv_disable(struct drm_i915_private *i915);
/* Stores plane specific WM parameters */
@@ -63,23 +74,26 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 9;
}
-static bool
+bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return HAS_SAGV(i915) &&
- i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
intel_sagv_block_time(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 14) {
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 14) {
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_SAGV);
+ val = intel_de_read(display, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
u32 val = 0;
int ret;
@@ -87,14 +101,14 @@ intel_sagv_block_time(struct drm_i915_private *i915)
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
- drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
}
return val;
- } else if (DISPLAY_VER(i915) == 11) {
+ } else if (DISPLAY_VER(display) == 11) {
return 10;
- } else if (HAS_SAGV(i915)) {
+ } else if (HAS_SAGV(display)) {
return 30;
} else {
return 0;
@@ -103,31 +117,33 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!HAS_SAGV(i915))
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ if (!HAS_SAGV(display))
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
* Probe to see if we have working SAGV control.
* For icl+ this was already determined by intel_bw_init_hw().
*/
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
skl_sagv_disable(i915);
- drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+ drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(i915);
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+ drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
"Excessive SAGV block time %u, ignoring\n",
- i915->display.sagv.block_time_us))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us))
+ display->sagv.block_time_us = 0;
if (!intel_has_sagv(i915))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us = 0;
}
/*
@@ -433,6 +449,7 @@ bool intel_can_enable_sagv(struct drm_i915_private *i915,
static int intel_compute_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
int ret;
struct intel_crtc *crtc;
@@ -443,12 +460,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
if (intel_crtc_can_enable_sagv(new_crtc_state))
new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
else
@@ -478,21 +518,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_can_enable_sagv(i915, new_bw_state);
- }
-
return 0;
}
@@ -699,7 +724,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane);
+ int color_plane, unsigned int pan_x);
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
@@ -746,7 +771,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_format_info(DRM_FORMAT_ARGB8888),
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
- crtc_state->pixel_rate, &wp, 0);
+ crtc_state->pixel_rate, &wp, 0, 0);
drm_WARN_ON(&i915->drm, ret);
for (level = 0; level < i915->display.wm.num_levels; level++) {
@@ -776,30 +801,40 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
+ struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_de_read(i915, CUR_BUF_CFG(pipe));
+ val = intel_de_read(display, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
+
+ *min_ddb = REG_FIELD_GET(PLANE_MIN_DBUF_BLOCKS_MASK, val);
+ *interim_ddb = REG_FIELD_GET(PLANE_INTERIM_DBUF_BLOCKS_MASK, val);
+ }
+
+ if (DISPLAY_VER(display) >= 11)
return;
- val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -816,7 +851,9 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
skl_ddb_get_hw_plane_state(i915, pipe,
plane_id,
&ddb[plane_id],
- &ddb_y[plane_id]);
+ &ddb_y[plane_id],
+ &min_ddb[plane_id],
+ &interim_ddb[plane_id]);
intel_display_power_put(i915, power_domain, wakeref);
}
@@ -1351,13 +1388,30 @@ static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- return DISPLAY_VER(i915) >= 13 &&
+ /* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */
+ return IS_DISPLAY_VER(display, 13, 20) &&
crtc_state->uapi.async_flip &&
plane->async_flip;
}
+unsigned int
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width, int height,
+ int cpp)
+{
+ /*
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
+ */
+ if (use_minimal_wm0_only(crtc_state, plane))
+ return 0;
+
+ return width * height * cpp;
+}
+
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
@@ -1367,7 +1421,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
data_rate += crtc_state->rel_data_rate[plane_id];
@@ -1379,7 +1433,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
return data_rate;
}
-static const struct skl_wm_level *
+const struct skl_wm_level *
skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
int level)
@@ -1392,7 +1446,7 @@ skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
return &wm->wm[level];
}
-static const struct skl_wm_level *
+const struct skl_wm_level *
skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id)
{
@@ -1494,6 +1548,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ struct intel_display *display = to_intel_display(state);
int num_active = hweight8(dbuf_state->active_pipes);
struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
@@ -1504,6 +1559,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_min_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_min_ddb));
+ memset(crtc_state->wm.skl.plane_interim_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_interim_ddb));
if (!crtc_state->hw.active)
return 0;
@@ -1514,12 +1573,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
/* Allocate fixed number of blocks for cursor. */
- if (DISPLAY_VER(i915) < 20) {
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
- }
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
iter.data_rate = skl_total_relative_data_rate(crtc_state);
@@ -1533,7 +1590,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) {
+ if (plane_id == PLANE_CURSOR) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1578,10 +1635,13 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
+ if (plane_id == PLANE_CURSOR)
continue;
if (DISPLAY_VER(i915) < 11 &&
@@ -1594,6 +1654,11 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
crtc_state->rel_data_rate[plane_id]);
}
+
+ if (DISPLAY_VER(display) >= 30) {
+ *min_ddb = wm->wm[0].min_ddb_alloc;
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+ }
}
drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
@@ -1637,6 +1702,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1650,6 +1717,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
skl_check_wm_level(&wm->sagv.wm0, ddb);
+ if (DISPLAY_VER(display) >= 30)
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+
skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
@@ -1725,9 +1795,10 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane)
+ int color_plane, unsigned int pan_x)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
@@ -1786,7 +1857,9 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 30)
+ interm_pbpl += (pan_x != 0);
+ else if (DISPLAY_VER(i915) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1828,7 +1901,8 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
fb->format, fb->modifier,
plane_state->hw.rotation,
intel_plane_pixel_rate(crtc_state, plane_state),
- wp, color_plane);
+ wp, color_plane,
+ plane_state->uapi.src.x1);
}
static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
@@ -1848,6 +1922,13 @@ static int skl_wm_max_lines(struct drm_i915_private *i915)
return 31;
}
+static bool xe3_auto_min_alloc_capable(struct intel_plane *plane, int level)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ return DISPLAY_VER(display) >= 30 && level == 0 && plane->id != PLANE_CURSOR;
+}
+
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
int level,
@@ -1892,7 +1973,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ blocks = fixed16_to_u32_round_up(selected_result);
+ if (DISPLAY_VER(i915) < 30)
+ blocks++;
+
/*
* Lets have blocks at minimum equivalent to plane_blocks_per_line
* as there will be at minimum one line for lines configuration. This
@@ -1977,6 +2061,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
result->enable = true;
+ result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
result->can_sagv = latency >= i915->display.sagv.block_time_us;
@@ -2350,117 +2435,24 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
return skl_wm_check_vblank(crtc_state);
}
-static void skl_ddb_entry_write(struct drm_i915_private *i915,
- i915_reg_t reg,
- const struct skl_ddb_entry *entry)
-{
- if (entry->end)
- intel_de_write_fw(i915, reg,
- PLANE_BUF_END(entry->end - 1) |
- PLANE_BUF_START(entry->start));
- else
- intel_de_write_fw(i915, reg, 0);
-}
-
-static void skl_write_wm_level(struct drm_i915_private *i915,
- i915_reg_t reg,
- const struct skl_wm_level *level)
-{
- u32 val = 0;
-
- if (level->enable)
- val |= PLANE_WM_EN;
- if (level->ignore_lines)
- val |= PLANE_WM_IGNORE_LINES;
- val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
- val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
-
- intel_de_write_fw(i915, reg, val);
-}
-
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- int level;
-
- for (level = 0; level < i915->display.wm.num_levels; level++)
- skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(i915)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
- &wm->sagv.wm0);
- skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(i915,
- PLANE_BUF_CFG(pipe, plane_id), ddb);
-
- if (DISPLAY_VER(i915) < 11)
- skl_ddb_entry_write(i915,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
-}
-
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- int level;
-
- for (level = 0; level < i915->display.wm.num_levels; level++)
- skl_write_wm_level(i915, CUR_WM(pipe, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(i915)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
- &wm->sagv.wm0);
- skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
-}
-
static bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
+ l1->blocks == l2->blocks &&
+ l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
+ struct intel_display *display = &i915->display;
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2509,12 +2501,14 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
}
static int
-skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+skl_ddb_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
@@ -2527,6 +2521,12 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
&new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
continue;
+ if (new_crtc_state->do_async_flip) {
+ drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
@@ -2560,10 +2560,10 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
- const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
@@ -2589,10 +2589,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915))
+ if (HAS_MBUS_JOINING(display)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
+ if (ret)
+ return ret;
+ }
+ }
+
for_each_intel_crtc(&i915->drm, crtc) {
enum pipe pipe = crtc->pipe;
@@ -2616,13 +2623,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes_late(state, "MBUS joining change");
- if (ret)
- return ret;
- }
-
drm_dbg_kms(&i915->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
@@ -2651,14 +2651,12 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_crtc_allocate_plane_ddb(state, crtc);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
+ ret = skl_ddb_add_affected_planes(state, crtc);
if (ret)
return ret;
}
@@ -2809,10 +2807,10 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *old_pipe_wm,
const struct skl_pipe_wm *new_pipe_wm)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2823,7 +2821,7 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
@@ -2886,6 +2884,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
&new_crtc_state->wm.skl.optimal))
continue;
+ if (new_crtc_state->do_async_flip) {
+ drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
@@ -2898,6 +2902,70 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
+/*
+ * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
+ * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
+ * watermark level1 and up and above. If watermark level 1 is
+ * invalid program it with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = DSB execution time
+ * If Variable Refresh Rate where Vmin != Vmax != Flipline:
+ * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = 0
+ */
+void
+intel_program_dpkgc_latency(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ u32 latency = LNL_PKG_C_LATENCY_MASK;
+ u32 added_wake_time = 0;
+ u32 max_linetime = 0;
+ u32 clear, val;
+ bool fixed_refresh_rate = false;
+ int i;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->vrr.enable ||
+ (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
+ fixed_refresh_rate = true;
+
+ max_linetime = max(new_crtc_state->linetime, max_linetime);
+ }
+
+ if (fixed_refresh_rate) {
+ added_wake_time = DSB_EXE_TIME +
+ display->sagv.block_time_us;
+
+ latency = skl_watermark_max_latency(i915, 1);
+
+ /* Wa_22020432604 */
+ if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
+
+ /* Wa_22020299601 */
+ if ((latency && max_linetime) &&
+ (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
+ latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
+ } else if (!latency) {
+ latency = LNL_PKG_C_LATENCY_MASK;
+ }
+ }
+
+ clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+ val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+
+ intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
+}
+
static int
skl_compute_wm(struct intel_atomic_state *state)
{
@@ -2935,18 +3003,21 @@ skl_compute_wm(struct intel_atomic_state *state)
return 0;
}
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(struct intel_display *display,
+ u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+ level->auto_min_alloc_wm_enable = DISPLAY_VER(display) >= 30 ?
+ val & PLANE_WM_AUTO_MIN_ALLOC_EN : 0;
}
static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
int level;
@@ -2955,37 +3026,37 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(display, PLANE_WM(pipe, plane_id, level));
else
- val = intel_de_read(i915, CUR_WM(pipe, level));
+ val = intel_de_read(display, CUR_WM(pipe, level));
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ skl_wm_level_from_reg_val(display, val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
+ skl_wm_level_from_reg_val(display, val, &wm->trans_wm);
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(i915) >= 12) {
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(display) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -2994,14 +3065,17 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc *crtc;
- if (HAS_MBUS_JOINING(i915))
- dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ if (HAS_MBUS_JOINING(display))
+ dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
- for_each_intel_crtc(&i915->drm, crtc) {
+ dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
+
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -3022,12 +3096,17 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb =
+ &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
if (!crtc_state->hw.active)
continue;
skl_ddb_get_hw_plane_state(i915, crtc->pipe,
- plane_id, ddb, ddb_y);
+ plane_id, ddb, ddb_y,
+ min_ddb, interim_ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
@@ -3049,7 +3128,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->slices[pipe] =
skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
@@ -3057,203 +3136,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
str_yes_no(dbuf_state->joined_mbus));
}
- dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-static void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
-}
-
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
-}
-
-void intel_wm_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- struct intel_plane *plane;
- u8 hw_enabled_slices;
- int level;
-
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
-
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
+ dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
@@ -3398,31 +3281,19 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
- if (HAS_HW_SAGV_WM(i915))
- i915->display.wm.num_levels = 6;
- else
- i915->display.wm.num_levels = 8;
+ struct intel_display *display = &i915->display;
- if (DISPLAY_VER(i915) >= 14)
- mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ if (HAS_HW_SAGV_WM(display))
+ display->wm.num_levels = 6;
else
- skl_read_wm_latency(i915, i915->display.wm.skl_latency);
-
- intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
-}
-
-static const struct intel_wm_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
-};
-
-void skl_wm_init(struct drm_i915_private *i915)
-{
- intel_sagv_init(i915);
+ display->wm.num_levels = 8;
- skl_setup_wm_latency(i915);
+ if (DISPLAY_VER(display) >= 14)
+ mtl_read_wm_latency(i915, display->wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, display->wm.skl_latency);
- i915->display.funcs.wm = &skl_wm_funcs;
+ intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3462,55 +3333,287 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
int intel_dbuf_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
if (!dbuf_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ intel_atomic_global_obj_init(display, &display->dbuf.obj,
&dbuf_state->base, &intel_dbuf_funcs);
return 0;
}
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
+static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+{
+ switch (pipe) {
+ case PIPE_A:
+ case PIPE_D:
+ active_pipes &= BIT(PIPE_A) | BIT(PIPE_D);
+ break;
+ case PIPE_B:
+ case PIPE_C:
+ active_pipes &= BIT(PIPE_B) | BIT(PIPE_C);
+ break;
+ default: /* to suppress compiler warning */
+ MISSING_CASE(pipe);
+ return false;
+ }
+
+ return is_power_of_2(active_pipes);
+}
+
+static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 val = 0;
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= MBUS_DBOX_I_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
+ else if (IS_ALDERLAKE_P(i915))
+ /* Wa_22010947358:adl-p */
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 14) {
+ val |= MBUS_DBOX_B_CREDIT(0xA);
+ } else if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ if (DISPLAY_VERx100(i915) == 1400) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
+ val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
+
+ return val;
+}
+
+static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ pipe_mbus_dbox_ctl(crtc, dbuf_state));
+}
+
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+}
+
+int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
+ int ratio)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(dbuf_state))
+ return PTR_ERR(dbuf_state);
+
+ dbuf_state->mdclk_cdclk_ratio = ratio;
+
+ return intel_atomic_lock_global_state(&dbuf_state->base);
+}
+
+void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+ int ratio, bool joined_mbus)
+{
+ struct intel_display *display = &i915->display;
enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- if (!HAS_MBUS_JOINING(i915))
+ if (!HAS_MBUS_JOINING(display))
return;
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+
+ if (joined_mbus)
+ ratio *= 2;
+
+ drm_dbg_kms(display->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ ratio, str_yes_no(joined_mbus));
+
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+}
+
+static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ int mdclk_cdclk_ratio;
+
+ if (intel_cdclk_is_decreasing_later(state)) {
+ /* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
+ mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
} else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ /* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
+ mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
+ intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ new_dbuf_state->joined_mbus);
+}
+
+static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+
+ crtc = intel_crtc_for_pipe(display, pipe);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
+ return pipe;
+ else
+ return INVALID_PIPE;
+}
+
+static void mbus_ctl_join_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state,
+ enum pipe pipe)
+{
+ u32 mbus_ctl;
+
+ if (dbuf_state->joined_mbus)
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
+ else
+ mbus_ctl = MBUS_HASHING_MODE_2x2;
+
+ if (pipe != INVALID_PIPE)
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
+ else
+ mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
+
intel_de_rmw(i915, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+}
+
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+}
+
+void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
+{
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mbus_join_update(state, pipe);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mdclk_min_tracker_update(state);
+ }
+}
+
+void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state)
+ return;
+
+ if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
+ enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ intel_dbuf_mbus_join_update(state, pipe);
+
+ if (pipe != INVALID_PIPE) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ } else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
+ old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ intel_dbuf_mdclk_min_tracker_update(state);
+ intel_mbus_dbox_update(state);
+ }
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
}
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
@@ -3520,18 +3623,20 @@ void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_new_dbuf_state(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices;
+ new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
return;
WARN_ON(!new_dbuf_state->base.changed);
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(i915,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
+ gen9_dbuf_slices_update(i915, new_slices);
}
void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
@@ -3541,104 +3646,259 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_new_dbuf_state(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
+ u8 old_slices, new_slices;
- if (!new_dbuf_state ||
- (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
- new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ if (!new_dbuf_state)
+ return;
+
+ old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
+ new_slices = new_dbuf_state->enabled_slices;
+
+ if (old_slices == new_slices)
return;
WARN_ON(!new_dbuf_state->base.changed);
- gen9_dbuf_slices_update(i915,
- new_dbuf_state->enabled_slices);
+ gen9_dbuf_slices_update(i915, new_slices);
}
-static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+static void skl_mbus_sanitize(struct drm_i915_private *i915)
{
- switch (pipe) {
- case PIPE_A:
- return !(active_pipes & BIT(PIPE_D));
- case PIPE_D:
- return !(active_pipes & BIT(PIPE_A));
- case PIPE_B:
- return !(active_pipes & BIT(PIPE_C));
- case PIPE_C:
- return !(active_pipes & BIT(PIPE_B));
- default: /* to suppress compiler warning */
- MISSING_CASE(pipe);
- break;
+ struct intel_display *display = &i915->display;
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+
+ if (!HAS_MBUS_JOINING(display))
+ return;
+
+ if (!dbuf_state->joined_mbus ||
+ adlp_check_mbus_joined(dbuf_state->active_pipes))
+ return;
+
+ drm_dbg_kms(display->drm, "Disabling redundant MBUS joining (active pipes 0x%x)\n",
+ dbuf_state->active_pipes);
+
+ dbuf_state->joined_mbus = false;
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ dbuf_state->mdclk_cdclk_ratio,
+ dbuf_state->joined_mbus);
+ pipe_mbus_dbox_ctl_update(i915, dbuf_state);
+ mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
}
return false;
}
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static void skl_dbuf_sanitize(struct drm_i915_private *i915)
{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+
+ skl_mbus_sanitize(i915);
+ skl_dbuf_sanitize(i915);
+}
+
+void intel_wm_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
- const struct intel_crtc *crtc;
- u32 val = 0;
- int i;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ u16 min_ddb[I915_MAX_PLANES];
+ u16 interim_ddb[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+ int level;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
return;
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
return;
- if (DISPLAY_VER(i915) >= 14)
- val |= MBUS_DBOX_I_CREDIT(2);
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
- if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
- val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
- val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
- }
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
- if (DISPLAY_VER(i915) >= 14)
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
- MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
- /* Wa_22010947358:adl-p */
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
- else
- val |= MBUS_DBOX_A_CREDIT(2);
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
- if (DISPLAY_VER(i915) >= 14) {
- val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(12);
- } else {
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
- }
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- u32 pipe_val = val;
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
- if (!new_crtc_state->hw.active)
- continue;
+ /* Watermarks */
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
- if (DISPLAY_VER(i915) >= 14) {
- if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
- new_dbuf_state->active_pipes))
- pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
- else
- pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
}
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
}
+
+ kfree(hw);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
@@ -3714,22 +3974,23 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct intel_display *display = &i915->display;
+ struct drm_minor *minor = display->drm->primary;
- if (HAS_IPC(i915))
+ if (HAS_IPC(display))
debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
&skl_watermark_ipc_status_fops);
- if (HAS_SAGV(i915))
+ if (HAS_SAGV(display))
debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
&intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915)
+unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
unsigned int latency = skl_wm_latency(i915, level, NULL);
if (latency)