summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_bw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_bw.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c228
1 files changed, 184 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7f2a50b4f494..23edc81741de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -22,6 +22,8 @@ struct intel_qgv_point {
u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
};
+#define DEPROGBWPCLIMIT 60
+
struct intel_psf_gv_point {
u8 clk; /* clock in multiples of 16.6666 MHz */
};
@@ -162,7 +164,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
+ drm_err(&dev_priv->drm,
+ "Failed to disable qgv points (0x%x) points: 0x%x\n",
+ ret, points_mask);
return ret;
}
@@ -239,6 +243,9 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->channel_width = 16;
qi->deinterleave = 4;
break;
+ case INTEL_DRAM_GDDR:
+ qi->channel_width = 32;
+ break;
default:
MISSING_CASE(dram_info->type);
return -EINVAL;
@@ -290,8 +297,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp = &qi->points[i];
ret = intel_read_qgv_point_info(dev_priv, sp, i);
- if (ret)
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
return ret;
+ }
drm_dbg_kms(&dev_priv->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
@@ -383,6 +392,12 @@ static const struct intel_sa_info mtl_sa_info = {
.derating = 10,
};
+static const struct intel_sa_info xe2_hpd_sa_info = {
+ .derating = 30,
+ .deprogbwlimit = 53,
+ /* Other values not used by simplified algorithm */
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -489,7 +504,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
dclk_max = icl_sagv_max_dclk(&qi);
peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
- maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */
+ maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
/*
@@ -594,6 +609,54 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}
+static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+ const struct intel_sa_info *sa)
+{
+ struct intel_qgv_info qi = {};
+ int num_channels = i915->dram_info.num_channels;
+ int peakbw, maxdebw;
+ int ret, i;
+
+ ret = icl_get_qgv_points(i915, &qi, true);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+
+ peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
+ maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
+
+ for (i = 0; i < qi.num_points; i++) {
+ const struct intel_qgv_point *point = &qi.points[i];
+ int bw = num_channels * (qi.channel_width / 8) * point->dclk;
+
+ i915->display.bw.max[0].deratedbw[i] =
+ min(maxdebw, (100 - sa->derating) * bw / 100);
+ i915->display.bw.max[0].peakbw[i] = bw;
+
+ drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, i915->display.bw.max[0].deratedbw[i],
+ i915->display.bw.max[0].peakbw[i]);
+ }
+
+ /* Bandwidth does not depend on # of planes; set all groups the same */
+ i915->display.bw.max[0].num_planes = 1;
+ i915->display.bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
+ memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
+ sizeof(i915->display.bw.max[0]));
+
+ /*
+ * Xe2_HPD should always have exactly two QGV points representing
+ * battery and plugged-in operation.
+ */
+ drm_WARN_ON(&i915->drm, qi.num_points != 2);
+ i915->display.sagv.status = I915_SAGV_ENABLED;
+
+ return 0;
+}
+
static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
int num_planes, int qgv_point)
{
@@ -659,12 +722,30 @@ static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
return bi->psf_bw[psf_gv_point];
}
+static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+ int num_active_planes, int qgv_point)
+{
+ unsigned int idx;
+
+ if (DISPLAY_VER(i915) >= 12)
+ idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ else
+ idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+
+ if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ return 0;
+
+ return i915->display.bw.max[idx].deratedbw[qgv_point];
+}
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
+ xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(dev_priv) >= 14)
tgl_get_bw_info(dev_priv, &mtl_sa_info);
else if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
@@ -735,6 +816,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -804,6 +886,80 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+ int num_active_planes)
+{
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int max_bw_point = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate =
+ icl_qgv_bw(i915, num_active_planes, i);
+
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = BIT(i);
+ max_bw = max_data_rate;
+ }
+ }
+
+ return max_bw_point;
+}
+
+static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+ unsigned int qgv_points,
+ unsigned int psf_points)
+{
+ return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+}
+
+static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int max_bw_point_mask = 0;
+ unsigned int max_bw = 0;
+ int i;
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(i915, i);
+
+ if (max_data_rate > max_bw) {
+ max_bw_point_mask = BIT(i);
+ max_bw = max_data_rate;
+ } else if (max_data_rate == max_bw) {
+ max_bw_point_mask |= BIT(i);
+ }
+ }
+
+ return max_bw_point_mask;
+}
+
+static void icl_force_disable_sagv(struct drm_i915_private *i915,
+ struct intel_bw_state *bw_state)
+{
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
+
+ drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ bw_state->qgv_points_mask);
+
+ icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+}
+
static int mtl_find_qgv_points(struct drm_i915_private *i915,
unsigned int data_rate,
unsigned int num_active_planes,
@@ -881,8 +1037,6 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int max_bw_point = 0;
- unsigned int max_bw = 0;
unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 psf_points = 0;
@@ -895,31 +1049,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int idx;
- unsigned int max_data_rate;
-
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, i);
- else
- idx = icl_max_bw_index(i915, num_active_planes, i);
-
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
- continue;
-
- max_data_rate = i915->display.bw.max[idx].deratedbw[i];
-
- /*
- * We need to know which qgv point gives us
- * maximum bandwidth in order to disable SAGV
- * if we find that we exceed SAGV block time
- * with watermarks. By that moment we already
- * have those, as it is calculated earlier in
- * intel_atomic_check,
- */
- if (max_data_rate > max_bw) {
- max_bw_point = i;
- max_bw = max_data_rate;
- }
+ unsigned int max_data_rate = icl_qgv_bw(i915,
+ num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
@@ -963,20 +1094,18 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* cause.
*/
if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = BIT(max_bw_point);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
- max_bw_point);
+ qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
+ drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ qgv_points);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask =
- ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) &
- icl_qgv_points_mask(i915);
-
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ qgv_points,
+ psf_points);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
@@ -1127,7 +1256,7 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
for_each_pipe(i915, pipe)
- min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
return min_cdclk;
}
@@ -1272,8 +1401,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state))
+ (intel_can_enable_sagv(i915, old_bw_state) !=
+ intel_can_enable_sagv(i915, new_bw_state) ||
+ new_bw_state->force_check_qgv))
changed = true;
/*
@@ -1287,6 +1417,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_bw_state->force_check_qgv = false;
+
return 0;
}
@@ -1313,16 +1445,24 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *dev_priv)
+int intel_bw_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
+ intel_atomic_global_obj_init(display, &display->bw.obj,
&state->base, &intel_bw_funcs);
+ /*
+ * Limit this only if we have SAGV. And for Display version 14 onwards
+ * sagv is handled though pmdemand requests
+ */
+ if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
+ icl_force_disable_sagv(i915, state);
+
return 0;
}