summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_psr.c
diff options
context:
space:
mode:
authorZhenyu Wang <zhenyuw@linux.intel.com>2019-04-16 16:50:34 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2019-04-16 16:50:34 +0800
commit95d002e0a34cb0f238abb39987f9980f325d8332 (patch)
treeb8ed70572a55b4e67410f906159f86e8aabb0f6a /drivers/gpu/drm/i915/intel_psr.c
parentd57b39e3ee3cdb4b00452090e386d197980cefc9 (diff)
parent28d618e9ab86f26a31af0b235ced55beb3e343c8 (diff)
Merge tag 'drm-intel-next-2019-04-04' into gvt-next
Merge back drm-intel-next for engine name definition refinement and 54939ea0bd85 ("drm/i915: Switch to use HWS indices rather than addresses") that would need gvt fixes to depend on. Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c386
1 files changed, 240 insertions, 146 deletions
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..ec874d802d48 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -51,7 +51,7 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -71,15 +71,12 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
- /* Disable PSR2 by default for all platforms */
- if (i915_modparams.enable_psr == -1)
- return false;
-
/* Cannot enable DSC and PSR2 simultaneously */
WARN_ON(crtc_state->dsc_params.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
@@ -231,7 +228,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
- uint8_t dprx = 0;
+ u8 dprx = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
&dprx) != 1)
@@ -241,7 +238,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
- uint8_t alpm_caps = 0;
+ u8 alpm_caps = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
&alpm_caps) != 1)
@@ -261,6 +258,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
+static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+{
+ u16 val;
+ ssize_t r;
+
+ /*
+ * Returning the default X granularity if granularity not required or
+ * if DPCD read fails
+ */
+ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
+ return 4;
+
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+ if (r != 2)
+ DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+
+ /*
+ * Spec says that if the value read is 0 the default granularity should
+ * be used instead.
+ */
+ if (r != 2 || val == 0)
+ val = 4;
+
+ return val;
+}
+
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
@@ -274,10 +297,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ return;
+ }
+
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
return;
}
+
dev_priv->psr.sink_support = true;
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
@@ -309,6 +338,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.su_x_granularity =
+ intel_dp_get_su_x_granulartiy(intel_dp);
}
}
}
@@ -351,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 aux_clock_divider, aux_ctl;
int i;
- static const uint8_t aux_msg[] = {
+ static const u8 aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
@@ -388,44 +419,30 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE);
- dpcd_val |= DP_PSR_ENABLE_PSR2;
+ dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ } else {
+ if (dev_priv->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (dev_priv->psr.link_standby)
- dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
- dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
-
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ u32 val = 0;
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
- if (IS_HASWELL(dev_priv))
- val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
- if (dev_priv->psr.link_standby)
- val |= EDP_PSR_LINK_STANDBY;
+ if (INTEL_GEN(dev_priv) >= 11)
+ val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
@@ -434,7 +451,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
@@ -448,6 +465,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (dev_priv->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
+ val |= intel_psr1_get_tp_time(intel_dp);
+
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
@@ -468,25 +514,30 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
- /* FIXME: selective update is probably totally broken because it doesn't
- * mesh at all with our frontbuffer tracking. And the hw alone isn't
- * good enough. */
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ /*
+ * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin
+ * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after
+ * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now
+ * lets workaround the issue by cleaning PSR_CTL before enable PSR2.
+ */
+ I915_WRITE(EDP_PSR_CTL, 0);
+
I915_WRITE(EDP_PSR2_CTL, val);
}
@@ -498,11 +549,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -519,7 +565,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
- } else if (IS_GEN9(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
}
@@ -531,6 +577,23 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /*
+ * HW sends SU blocks of size four scan lines, which means the starting
+ * X coordinate and Y granularity requirements will always be met. We
+ * only need to validate the SU block width is a multiple of
+ * x granularity.
+ */
+ if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ return false;
+ }
+
+ if (crtc_state->crc_enabled) {
+ DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -641,17 +704,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
+ if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+ !IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
cpu_transcoder);
u32 chicken = I915_READ(reg);
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
- chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
- | PSR2_ADD_VERTICAL_LINE_COUNT);
-
- else
- chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
+ chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(reg, chicken);
}
@@ -677,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- if (dev_priv->psr.enabled)
- return;
+ WARN_ON(dev_priv->psr.enabled);
+
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -711,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.prepared) {
- DRM_DEBUG_KMS("PSR already in use\n");
+
+ if (!psr_global_enabled(dev_priv->psr.debug)) {
+ DRM_DEBUG_KMS("PSR disabled by flag\n");
goto unlock;
}
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.prepared = true;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
- if (psr_global_enabled(dev_priv->psr.debug))
- intel_psr_enable_locked(dev_priv, crtc_state);
- else
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -778,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
- 2000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ psr_status, psr_status_mask, 0, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -807,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.prepared) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
intel_psr_disable_locked(intel_dp);
- dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ bool enable, psr2_enable;
+
+ if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ return;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+ if (crtc_state->crc_enabled && psr->enabled)
+ psr_force_hw_tracking_exit(dev_priv);
+
+ goto unlock;
+ }
+
+ if (psr->enabled)
+ intel_psr_disable_locked(intel_dp);
+
+ if (enable)
+ intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @new_crtc_state: new CRTC state
@@ -849,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -874,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -883,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
-static bool switching_psr(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
- u32 mode)
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
- /* Can't switch psr state anyway if PSR2 is not supported. */
- if (!crtc_state || !crtc_state->has_psr2)
- return false;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ int err;
- if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
- if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ state->acquire_ctx = &ctx;
+
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+ struct intel_crtc_state *intel_crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto error;
+ }
+
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+
+ if (crtc_state->active && intel_crtc_state->has_psr) {
+ /* Mark mode as changed to trigger a pipe->update() */
+ crtc_state->mode_changed = true;
+ break;
+ }
+ }
+
+ err = drm_atomic_commit(state);
+
+error:
+ if (err == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ err = drm_modeset_backoff(&ctx);
+ if (!err)
+ goto retry;
+ }
- return false;
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_atomic_state_put(state);
+
+ return err;
}
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 val)
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state = NULL;
- struct drm_crtc_commit *commit;
- struct drm_crtc *crtc;
- struct intel_dp *dp;
+ const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+ u32 old_mode;
int ret;
- bool enable;
- u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -920,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
- if (ret)
- return ret;
-
- /* dev_priv->psr.dp should be set once and then never touched again. */
- dp = READ_ONCE(dev_priv->psr.dp);
- conn_state = dp->attached_connector->base.state;
- crtc = conn_state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- commit = crtc_state->base.commit;
- } else {
- commit = conn_state->commit;
- }
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- return ret;
- }
-
ret = mutex_lock_interruptible(&dev_priv->psr.lock);
if (ret)
return ret;
- enable = psr_global_enabled(val);
-
- if (!enable || switching_psr(dev_priv, crtc_state, mode))
- intel_psr_disable_locked(dev_priv->psr.dp);
-
+ old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- if (crtc)
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
- if (dev_priv->psr.prepared && enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
-
mutex_unlock(&dev_priv->psr.lock);
+
+ if (old_mode != mode)
+ ret = intel_psr_fastset_force(dev_priv);
+
return ret;
}
@@ -1080,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits) {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
- }
+ if (frontbuffer_bits)
+ psr_force_hw_tracking_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_work(&dev_priv->psr.work);