summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_cx0_phy.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_cx0_phy.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c214
1 files changed, 112 insertions, 102 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 80e4ec6ee403..d414f6b7f993 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -31,7 +31,7 @@
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
{
- if (IS_METEORLAKE(i915) && (phy < PHY_C))
+ if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C)
return true;
return false;
@@ -46,6 +46,22 @@ static int lane_mask_to_lane(u8 lane_mask)
return ilog2(lane_mask);
}
+static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
+ struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (!intel_tc_port_in_dp_alt_mode(dig_port))
+ return INTEL_CX0_BOTH_LANES;
+
+ /*
+ * In DP-alt with pin assignment D, only PHY lane 0 is owned
+ * by display and lane 1 is owned by USB.
+ */
+ return intel_tc_port_max_lane_count(dig_port) > 2
+ ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
+}
+
static void
assert_dc_off(struct drm_i915_private *i915)
{
@@ -55,19 +71,38 @@ assert_dc_off(struct drm_i915_private *i915)
drm_WARN_ON(&i915->drm, !enabled);
}
+static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
+{
+ int lane;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
+ intel_de_rmw(i915,
+ XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane),
+ XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
+ XELPDP_PORT_MSGBUS_TIMER_VAL);
+}
+
/*
* Prepare HW for CX0 phy transactions.
*
* It is required that PSR and DC5/6 are disabled before any CX0 message
* bus transaction is executed.
+ *
+ * We also do the msgbus timer programming here to ensure that the timer
+ * is already programmed before any access to the msgbus.
*/
static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
{
+ intel_wakeref_t wakeref;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_psr_pause(intel_dp);
- return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+ intel_cx0_program_msgbus_timer(encoder);
+
+ return wakeref;
}
static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
@@ -116,6 +151,13 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
+
+ if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) &
+ XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Hardware did not detect a timeout\n",
+ phy_name(phy));
+
intel_cx0_bus_reset(i915, port, lane);
return -ETIMEDOUT;
}
@@ -359,6 +401,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(i915, encoder->port);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
intel_wakeref_t wakeref;
int n_entries, ln;
@@ -371,13 +414,13 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
}
if (intel_is_c10phy(i915, phy)) {
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CMN(3),
+ intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
C10_CMN3_TXVBOOST_MASK,
C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
MB_WRITE_UNCOMMITTED);
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_TX(1),
+ intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
C10_TX1_TERMCTL_MASK,
C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
MB_WRITE_COMMITTED);
@@ -385,32 +428,34 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < crtc_state->lane_count; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- int lane, tx;
+ int lane = ln / 2;
+ int tx = ln % 2;
+ u8 lane_mask = lane == 0 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
- lane = ln / 2;
- tx = ln % 2;
+ if (!(lane_mask & owned_lane_mask))
+ continue;
- intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 0),
+ intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 1),
+ intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
MB_WRITE_COMMITTED);
- intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 2),
+ intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
C10_PHY_OVRD_LEVEL_MASK,
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
MB_WRITE_COMMITTED);
}
/* Write Override enables in 0xD71 */
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_OVRD,
+ intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
MB_WRITE_COMMITTED);
if (intel_is_c10phy(i915, phy))
- intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -2534,17 +2579,15 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
{
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(i915, port);
- bool both_lanes = intel_tc_port_fia_max_lane_count(enc_to_dig_port(encoder)) > 2;
- u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 :
- INTEL_CX0_LANE0;
- u32 lane_pipe_reset = both_lanes ?
- XELPDP_LANE_PIPE_RESET(0) |
- XELPDP_LANE_PIPE_RESET(1) :
- XELPDP_LANE_PIPE_RESET(0);
- u32 lane_phy_current_status = both_lanes ?
- XELPDP_LANE_PHY_CURRENT_STATUS(0) |
- XELPDP_LANE_PHY_CURRENT_STATUS(1) :
- XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
+ ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_CX0_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
XELPDP_PORT_BUF_SOC_PHY_READY,
@@ -2563,15 +2606,11 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
- intel_cx0_get_pclk_refclk_request(both_lanes ?
- INTEL_CX0_BOTH_LANES :
- INTEL_CX0_LANE0),
+ intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
- intel_cx0_get_pclk_refclk_ack(both_lanes ?
- INTEL_CX0_BOTH_LANES :
- INTEL_CX0_LANE0),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
intel_cx0_get_pclk_refclk_ack(lane_mask),
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
@@ -2593,79 +2632,43 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
struct intel_encoder *encoder, int lane_count,
bool lane_reversal)
{
- u8 l0t1, l0t2, l1t1, l1t2;
+ int i;
+ u8 disables;
bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
+ u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
enum port port = encoder->port;
if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_rmw(i915, port, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
- /* TODO: DP-alt MFD case where only one PHY lane should be programmed. */
- l0t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2));
- l0t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2));
- l1t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2));
- l1t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2));
-
- l0t1 |= CONTROL2_DISABLE_SINGLE_TX;
- l0t2 |= CONTROL2_DISABLE_SINGLE_TX;
- l1t1 |= CONTROL2_DISABLE_SINGLE_TX;
- l1t2 |= CONTROL2_DISABLE_SINGLE_TX;
-
- if (lane_reversal) {
- switch (lane_count) {
- case 4:
- l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
- fallthrough;
- case 3:
- l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
- fallthrough;
- case 2:
- l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
- fallthrough;
- case 1:
- l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
- break;
- default:
- MISSING_CASE(lane_count);
- }
- } else {
- switch (lane_count) {
- case 4:
- l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
- fallthrough;
- case 3:
- l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
- fallthrough;
- case 2:
- l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
- l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
- break;
- case 1:
- if (dp_alt_mode)
- l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
- else
- l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
- break;
- default:
- MISSING_CASE(lane_count);
- }
+ if (lane_reversal)
+ disables = REG_GENMASK8(3, 0) >> lane_count;
+ else
+ disables = REG_GENMASK8(3, 0) << lane_count;
+
+ if (dp_alt_mode && lane_count == 1) {
+ disables &= ~REG_GENMASK8(1, 0);
+ disables |= REG_FIELD_PREP8(REG_GENMASK8(1, 0), 0x1);
}
- /* disable MLs */
- intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2),
- l0t1, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2),
- l0t2, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2),
- l1t1, MB_WRITE_COMMITTED);
- intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2),
- l1t2, MB_WRITE_COMMITTED);
+ for (i = 0; i < 4; i++) {
+ int tx = i % 2 + 1;
+ u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
+
+ if (!(owned_lane_mask & lane_mask))
+ continue;
+
+ intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
+ CONTROL2_DISABLE_SINGLE_TX,
+ disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
+ MB_WRITE_COMMITTED);
+ }
if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
- intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
+ intel_cx0_rmw(i915, port, owned_lane_mask,
PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
@@ -2720,39 +2723,45 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_READY);
- /* 4. Program PHY internal PLL internal registers. */
+ /*
+ * 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
+ * (This is done inside intel_cx0_phy_transaction_begin(), since we would need
+ * the right timer thresholds for readouts too.)
+ */
+
+ /* 5. Program PHY internal PLL internal registers. */
if (intel_is_c10phy(i915, phy))
intel_c10_pll_program(i915, crtc_state, encoder);
else
intel_c20_pll_program(i915, crtc_state, encoder);
/*
- * 5. Program the enabled and disabled owned PHY lane
+ * 6. Program the enabled and disabled owned PHY lane
* transmitters over message bus
*/
intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal);
/*
- * 6. Follow the Display Voltage Frequency Switching - Sequence
+ * 7. Follow the Display Voltage Frequency Switching - Sequence
* Before Frequency Change. We handle this step in bxt_set_cdclk().
*/
/*
- * 7. Program DDI_CLK_VALFREQ to match intended DDI
+ * 8. Program DDI_CLK_VALFREQ to match intended DDI
* clock frequency.
*/
intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
crtc_state->port_clock);
/*
- * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
+ * 9. Set PORT_CLOCK_CTL register PCLK PLL Request
* LN<Lane for maxPCLK> to "1" to enable PLL.
*/
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_request(maxpclk_lane));
- /* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
+ /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
@@ -2761,7 +2770,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
/*
- * 10. Follow the Display Voltage Frequency Switching Sequence After
+ * 11. Follow the Display Voltage Frequency Switching Sequence After
* Frequency Change. We handle this step in bxt_set_cdclk().
*/
@@ -2995,12 +3004,13 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
}
void intel_c10pll_state_verify(struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_c10pll_state mpllb_hw_state = { 0 };
- struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_c10pll_state mpllb_hw_state = {};
+ const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
struct intel_encoder *encoder;
enum phy phy;
int i;