summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c456
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c185
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c125
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c579
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h131
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h183
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c31
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
96 files changed, 2234 insertions, 1114 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index bf0a655d009e..901d1961b739 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -5,7 +5,7 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
- depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
+ depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ff0a217b9d56..d96b6eda3320 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
*/
static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
+ struct amdgpu_crtc *acrtc = NULL;
+
if (crtc >= adev->mode_info.num_crtc)
return 0;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
+ acrtc = adev->mode_info.crtcs[crtc];
- return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
-
- /*
- * TODO rework base driver to use values directly.
- * for now parse it back into reg-format
- */
- dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
- &v_blank_start,
- &v_blank_end,
- &h_position,
- &v_position);
+ acrtc = adev->mode_info.crtcs[crtc];
- *position = v_position | (h_position << 16);
- *vbl = v_blank_start | (v_blank_end << 16);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
return 0;
}
@@ -424,12 +425,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED,
- amdgpu_crtc->crtc_id,
- amdgpu_crtc);
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return;
}
@@ -883,7 +884,7 @@ static int dm_set_powergating_state(void *handle,
}
/* Prototypes of private functions */
-static int dm_early_init(void* handle);
+static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1282,7 +1283,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
- pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
@@ -1347,6 +1348,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
@@ -1365,8 +1375,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
DP_TEST_RESPONSE,
&test_response.raw,
sizeof(test_response));
- }
- else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
/* offload_work->data is from handle_hpd_rx_irq->
@@ -1554,7 +1563,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
- if(amdgpu_dm_irq_init(adev)) {
+ if (amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1696,9 +1705,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
adev->dm.dc->debug.disable_dsc = true;
- }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -1942,8 +1950,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
mutex_destroy(&adev->dm.dpia_aux_lock);
-
- return;
}
static int load_dmcu_fw(struct amdgpu_device *adev)
@@ -1952,7 +1958,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
- switch(adev->asic_type) {
+ switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -2709,7 +2715,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } * bundle;
+ } *bundle;
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@@ -2739,8 +2745,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
cleanup:
kfree(bundle);
-
- return;
}
static int dm_resume(void *handle)
@@ -2954,8 +2958,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.set_powergating_state = dm_set_powergating_state,
};
-const struct amdgpu_ip_block_version dm_ip_block =
-{
+const struct amdgpu_ip_block_version dm_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
@@ -3000,9 +3003,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- if (caps->ext_caps->bits.oled == 1 /*||
- caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
- caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
caps->aux_support = true;
if (amdgpu_backlight == 0)
@@ -3236,86 +3242,6 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
- u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- u8 dret;
- bool new_irq_handled = false;
- int dpcd_addr;
- int dpcd_bytes_to_read;
-
- const int max_process_count = 30;
- int process_count = 0;
-
- const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
- if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
- dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
- /* DPCD 0x200 - 0x201 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT;
- } else {
- dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
- /* DPCD 0x2002 - 0x2005 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT_ESI;
- }
-
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- while (dret == dpcd_bytes_to_read &&
- process_count < max_process_count) {
- u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
- u8 retry;
- dret = 0;
-
- process_count++;
-
- DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
- /* handle HPD short pulse irq */
- if (aconnector->mst_mgr.mst_state)
- drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
- esi,
- ack,
- &new_irq_handled);
-
- if (new_irq_handled) {
- /* ACK at DPCD to notify down stream */
- for (retry = 0; retry < 3; retry++) {
- ssize_t wret;
-
- wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
- dpcd_addr + 1,
- ack[1]);
- if (wret == 1)
- break;
- }
-
- if (retry == 3) {
- DRM_ERROR("Failed to ack MST event.\n");
- return;
- }
-
- drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
- /* check if there is new irq to be handled */
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- new_irq_handled = false;
- } else {
- break;
- }
- }
-
- if (process_count == max_process_count)
- DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
@@ -3377,7 +3303,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- dm_handle_mst_sideband_msg(aconnector);
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
goto out;
}
@@ -3468,7 +3410,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
@@ -3477,7 +3419,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
/* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@@ -3486,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
- aconnector;
}
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
@@ -3503,7 +3445,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3517,11 +3459,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
@@ -3529,7 +3472,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
- dc_interrupt_to_irq_source(dc, i+1 , 0);
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
@@ -3585,7 +3528,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
@@ -3602,7 +3545,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@@ -4049,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
}
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
- unsigned *min, unsigned *max)
+ unsigned int *min, unsigned int *max)
{
if (!caps)
return 0;
@@ -4069,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4082,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4148,6 +4092,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
int bl_idx)
{
+ int ret;
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4162,13 +4107,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (!rc)
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
- } else {
- int ret = dc_link_get_backlight_level(link);
-
- if (ret == DC_ERROR_UNEXPECTED)
- return dm->brightness[bl_idx];
- return convert_brightness_to_user(&caps, ret);
}
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -4562,7 +4508,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_atomic_private_obj_fini(&dm->atomic_obj);
- return;
}
/******************************************************************************
@@ -5394,6 +5339,7 @@ static bool adjust_colour_depth_from_display_info(
{
enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
+
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@@ -5609,6 +5555,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
+
sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
@@ -5732,7 +5679,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return &aconnector->freesync_vid_base;
/* Find the preferred mode */
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
if (m->type & DRM_MODE_TYPE_PREFERRED) {
m_pref = m;
break;
@@ -5756,7 +5703,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
* For some monitors, preferred mode is not the mode with highest
* supported refresh rate.
*/
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
current_refresh = drm_mode_vrefresh(m);
if (m->hdisplay == m_pref->hdisplay &&
@@ -5849,6 +5796,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
edp_min_bpp_x16, edp_max_bpp_x16,
dsc_caps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&bw_range)) {
if (bw_range.max_kbps < link_bw_in_kbps) {
@@ -5857,6 +5805,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
0,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5871,6 +5820,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5914,12 +5864,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bandwidth_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
- timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
max_supported_bw_in_kbps = link_bandwidth_kbps;
dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
@@ -5931,6 +5883,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
@@ -6028,7 +5981,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* This may not be an error, the use case is when we have no
* usermode calls to reset and set mode upon hotplug. In this
* case, we call set mode ourselves to restore the previous mode
- * and the modelist may not be filled in in time.
+ * and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
@@ -6051,9 +6004,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_mode_set_crtcinfo(&mode, 0);
/*
- * If scaling is enabled and refresh rate didn't change
- * we copy the vic and polarities of the old timings
- */
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
@@ -6817,6 +6770,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
+
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
color_depth = convert_color_depth_from_display_info(connector,
@@ -7135,7 +7089,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *m;
- list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
if (drm_mode_equal(m, mode))
return true;
}
@@ -7295,6 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
/*
* configure support HPD hot plug connector_>polled default value is 0
@@ -7454,7 +7409,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- DRM_DEBUG_DRIVER("%s()\n", __func__);
i2c = create_i2c(link->ddc, link->link_index, &res);
if (!i2c) {
@@ -7982,7 +7936,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
- struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -8125,7 +8078,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Only allow immediate flips for fast updates that don't
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
*/
+ if (crtc->state->async_flip &&
+ acrtc_state->update_type != UPDATE_TYPE_FAST)
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8168,8 +8129,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* DRI3/Present extension with defined target_msc.
*/
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
- }
- else {
+ } else {
/* For variable refresh rate mode only:
* Get vblank of last completed flip to avoid > 1 vrr
* flips per video frame by use of throttling, but allow
@@ -8455,55 +8415,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
bool mode_set_reset_required = false;
- int r;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- r = drm_atomic_helper_wait_for_fences(dev, state, false);
- if (unlikely(r))
- DRM_ERROR("Waiting for fences timed out!");
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
-
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- } else {
- /* No state changes, retain current state. */
- dc_state_temp = dc_create_state(dm->dc);
- ASSERT(dc_state_temp);
- dc_state = dc_state_temp;
- dc_resource_state_copy_construct_current(dm->dc, dc_state);
- }
+ u32 i;
- for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -8526,9 +8451,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -8601,24 +8524,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dc_state) {
- /* if there mode set or reset, disable eDP PSR */
- if (mode_set_reset_required) {
- if (dm->vblank_control_workqueue)
- flush_workqueue(dm->vblank_control_workqueue);
+ /* if there mode set or reset, disable eDP PSR */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
- amdgpu_dm_psr_disable_all(dm);
- }
+ amdgpu_dm_psr_disable_all(dm);
+ }
- dm_enable_per_frame_crtc_master_sync(dc_state);
- mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
- /* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
- dc_allow_idle_optimizations(dm->dc, true);
- mutex_unlock(&dm->dc_lock);
- }
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8638,6 +8559,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8760,13 +8719,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update *dummy_updates;
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
@@ -8825,6 +8783,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -8836,6 +8795,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state->stream,
&stream_update);
mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
}
/**
@@ -8914,8 +8874,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
}
/* Update audio instances for each connector. */
@@ -8970,9 +8929,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
-
- if (dc_state_temp)
- dc_release_state(dc_state_temp);
}
static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -9104,8 +9060,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
- "timed out\n", crtc->base.id, crtc->name);
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
}
@@ -9190,7 +9146,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
return false;
}
-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
@@ -9312,9 +9269,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto skip_modeset;
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -9343,8 +9298,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER(
- "Mode change not required for front porch change, "
- "setting mode_changed to %d",
+ "Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
set_freesync_fixed_config(dm_new_crtc_state);
@@ -9356,9 +9310,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct drm_display_mode *high_mode;
high_mode = get_highest_refresh_rate_mode(aconnector, false);
- if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
set_freesync_fixed_config(dm_new_crtc_state);
- }
}
ret = dm_atomic_get_state(state, &dm_state);
@@ -9526,6 +9479,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
+
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -9624,11 +9578,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
}
/* Core DRM takes care of checking FB modifiers, so we only need to
- * check tiling flags when the FB doesn't have a modifier. */
+ * check tiling flags when the FB doesn't have a modifier.
+ */
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
if (adev->family < AMDGPU_FAMILY_AI) {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
- AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
} else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@@ -9762,8 +9717,8 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
return -EINVAL;
- else
- *is_top_most_overlay = false;
+
+ *is_top_most_overlay = false;
}
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -9850,12 +9805,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the underlying planes'. */
+ * blending properties match the underlying planes'.
+ */
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
- if (!new_cursor_state || !new_cursor_state->fb) {
+ if (!new_cursor_state || !new_cursor_state->fb)
return 0;
- }
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@@ -9900,6 +9855,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
+
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
if (!conn_state->crtc)
conn_state = old_conn_state;
@@ -10334,7 +10290,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
/* Store the overall update type for use later in atomic check. */
- for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
@@ -10356,7 +10312,7 @@ fail:
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
else
- DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 4561f55afa99..9fb5bb3a75a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue {
*/
bool is_handling_link_loss;
/**
+ * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+ * ready event when we're already handling mst message ready event
+ */
+ bool is_handling_mst_msg_rdy_event;
+ /**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ struct mutex handle_mst_msg_ready;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 0802f8e8fac5..52ecfa746b54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -123,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
crtc = secure_display_ctx->crtc;
- if (!crtc) {
+ if (!crtc)
return;
- }
psp = &drm_to_adev(crtc->dev)->psp;
@@ -151,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (!ret) {
- if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
- }
}
mutex_unlock(&psp->securedisplay_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 440fc0869a34..30d4c6fd95f5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ /*
+ * Only allow async flips for fast updates that don't change the FB
+ * pitch, the DCC state, rotation, etc.
+ */
+ if (crtc_state->async_flip &&
+ dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index d63ee636483b..7c21e21bcc51 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1075,24 +1075,24 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
switch (dm_crtc_state->stream->output_color_space) {
case COLOR_SPACE_SRGB:
- seq_printf(m, "sRGB");
+ seq_puts(m, "sRGB");
break;
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR601_LIMITED:
- seq_printf(m, "BT601_YCC");
+ seq_puts(m, "BT601_YCC");
break;
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR709_LIMITED:
- seq_printf(m, "BT709_YCC");
+ seq_puts(m, "BT709_YCC");
break;
case COLOR_SPACE_ADOBERGB:
- seq_printf(m, "opRGB");
+ seq_puts(m, "opRGB");
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
- seq_printf(m, "BT2020_RGB");
+ seq_puts(m, "BT2020_RGB");
break;
case COLOR_SPACE_2020_YCBCR:
- seq_printf(m, "BT2020_YCC");
+ seq_puts(m, "BT2020_YCC");
break;
default:
goto unlock;
@@ -3022,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused)
seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
}
} else {
- seq_printf(m, "ILR is not supported by this eDP panel.\n");
+ seq_puts(m, "ILR is not supported by this eDP panel.\n");
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5536d17306d0..8db47f66eac0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -39,10 +39,10 @@
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
{
-
struct dc_link *link = handle;
struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
- struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3
{
struct dc_link *link = handle;
- struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
- struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
+ {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
*srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
*srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
-
return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
}
-static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+static int psp_set_srm(struct psp_context *psp,
+ u8 *srm, uint32_t srm_size, uint32_t *srm_version)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
return -EINVAL;
@@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
static void link_lock(struct hdcp_workqueue *work, bool lock)
{
-
int i = 0;
for (i = 0; i < work->max_link; i++) {
@@ -160,10 +160,11 @@ static void link_lock(struct hdcp_workqueue *work, bool lock)
mutex_unlock(&work[i].mutex);
}
}
+
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
- uint8_t content_type,
+ u8 content_type,
bool enable_encryption)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -178,18 +179,19 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
query.display = NULL;
mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
- if (query.display != NULL) {
+ if (query.display) {
memcpy(display, query.display, sizeof(struct mod_hdcp_display));
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
- /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
- * (s3 resume case)
+ /* Explicitly set the saved SRM as sysfs call will be after
+ * we already enabled hdcp (s3 resume case)
*/
if (hdcp_work->srm_size > 0)
- psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
+ hdcp_work->srm_size,
&hdcp_work->srm_version);
display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
@@ -219,7 +221,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
- unsigned int link_index,
+ unsigned int link_index,
struct amdgpu_dm_connector *aconnector)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -238,7 +240,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
- aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms);
+ aconnector->base.index, conn_state->hdcp_content_type,
+ aconnector->base.dpms);
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
@@ -246,6 +249,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
+
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -274,15 +278,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index
schedule_work(&hdcp_w->cpirq_work);
}
-
-
-
static void event_callback(struct work_struct *work)
{
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
- callback_dwork);
+ callback_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -294,13 +295,12 @@ static void event_callback(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
-
}
static void event_property_update(struct work_struct *work)
{
- struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
+ property_update_work);
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_device *dev;
long ret;
@@ -334,11 +334,10 @@ static void event_property_update(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
if (conn_state->commit) {
- ret = wait_for_completion_interruptible_timeout(
- &conn_state->commit->hw_done, 10 * HZ);
+ ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
+ 10 * HZ);
if (ret == 0) {
- DRM_ERROR(
- "HDCP state unknown! Setting it to DESIRED");
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
hdcp_work->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
}
@@ -349,24 +348,20 @@ static void event_property_update(struct work_struct *work)
DRM_MODE_HDCP_CONTENT_TYPE0 &&
hdcp_work->encryption_status[conn_index] <=
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
-
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
} else if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE1 &&
hdcp_work->encryption_status[conn_index] ==
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
}
} else {
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
- drm_hdcp_update_content_protection(
- connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
-
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -402,7 +397,7 @@ static void event_property_validate(struct work_struct *work)
&query);
DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
- aconnector->base.index,
+ aconnector->base.index,
aconnector->base.state->content_protection,
query.encryption_status,
hdcp_work->encryption_status[conn_index]);
@@ -410,7 +405,8 @@ static void event_property_validate(struct work_struct *work)
if (query.encryption_status !=
hdcp_work->encryption_status[conn_index]) {
DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
- hdcp_work->encryption_status[conn_index], query.encryption_status);
+ hdcp_work->encryption_status[conn_index],
+ query.encryption_status);
hdcp_work->encryption_status[conn_index] =
query.encryption_status;
@@ -429,7 +425,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work),
- struct hdcp_workqueue,
+ struct hdcp_workqueue,
watchdog_timer_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -443,7 +439,6 @@ static void event_watchdog_timer(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
static void event_cpirq(struct work_struct *work)
@@ -459,10 +454,8 @@ static void event_cpirq(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
-
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
{
int i = 0;
@@ -478,10 +471,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
-
static bool enable_assr(void *handle, struct dc_link *link)
{
-
struct hdcp_workqueue *hdcp_work = handle;
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
@@ -499,7 +490,8 @@ static bool enable_assr(void *handle, struct dc_link *link)
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
- dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
+ link->link_enc_hw_inst;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -541,7 +533,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
else if (aconnector->dc_em_sink)
sink = aconnector->dc_em_sink;
- if (sink != NULL)
+ if (sink)
link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
@@ -567,16 +559,20 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
conn_state = aconnector->base.state;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
- (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
- (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+ (!!aconnector->base.state) ?
+ aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ?
+ aconnector->base.state->hdcp_content_type : -1);
if (conn_state)
hdcp_update_display(hdcp_work, link_index, aconnector,
- conn_state->hdcp_content_type, false);
+ conn_state->hdcp_content_type, false);
}
-
-/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+/**
+ * DOC: Add sysfs interface for set/get srm
+ *
+ * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
* will automatically call once or twice depending on the size
*
* call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
@@ -587,23 +583,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
* the last call we will send the full SRM. PSP will fail on every call before the last.
*
- * This means we don't know if the SRM is good until the last call. And because of this limitation we
- * cannot throw errors early as it will stop the kernel from writing to sysfs
+ * This means we don't know if the SRM is good until the last call. And because of this
+ * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
*
* Example 1:
- * Good SRM size = 5096
- * first call to write 4096 -> PSP fails
- * Second call to write 1000 -> PSP Pass -> SRM is set
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
*
* Example 2:
- * Bad SRM size = 4096
- * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
- * is the last call)
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
*
* Solution?:
- * 1: Parse the SRM? -> It is signed so we don't know the EOF
- * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
- * below
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
*
* Easy Solution:
* Always call get after Set to verify if set was successful.
@@ -612,20 +608,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* +----------------------+
* PSP will only update its srm if its older than the one we are trying to load.
* Always do set first than get.
- * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
- * version and save it
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
*
- * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
- * same(newer) version back and save it
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
*
- * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
- * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
-static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint32_t srm_version = 0;
+ u32 srm_version = 0;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
link_lock(work, true);
@@ -639,19 +636,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi
work->srm_version = srm_version;
}
-
link_lock(work, false);
return count;
}
-static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint8_t *srm = NULL;
- uint32_t srm_version;
- uint32_t srm_size;
+ u8 *srm = NULL;
+ u32 srm_version;
+ u32 srm_size;
size_t ret = count;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
@@ -684,12 +681,12 @@ ret:
/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
*
* For example,
- * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
- * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
- * across boot/reboots/suspend/resume/shutdown
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
*
- * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
- * to make the SRM persistent.
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
+ * we need to make the SRM persistent.
*
* -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
* -The kernel cannot write to the file systems.
@@ -699,8 +696,8 @@ ret:
*
* Usermode can read/write to/from PSP using the sysfs interface
* For example:
- * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
- * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
*/
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
@@ -709,10 +706,9 @@ static const struct bin_attribute data_attr = {
.read = srm_data_read,
};
-
-struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
+ struct cp_psp *cp_psp, struct dc *dc)
{
-
int max_caps = dc->caps.max_links;
struct hdcp_workqueue *hdcp_work;
int i = 0;
@@ -721,14 +717,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
if (ZERO_OR_NULL_PTR(hdcp_work))
return NULL;
- hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm), GFP_KERNEL);
- if (hdcp_work->srm == NULL)
+ if (!hdcp_work->srm)
goto fail_alloc_context;
- hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
- if (hdcp_work->srm_temp == NULL)
+ if (!hdcp_work->srm_temp)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
@@ -781,10 +779,5 @@ fail_alloc_context:
kfree(hdcp_work);
return NULL;
-
-
-
}
-
-
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index d9a482908380..e94eeeb97688 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -68,15 +68,15 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
}
}
-/* dm_helpers_parse_edid_caps
- *
- * Parse edid caps
+/**
+ * dm_helpers_parse_edid_caps() - Parse edid caps
*
+ * @link: current detected link
* @edid: [in] pointer to edid
- * edid_caps: [in] pointer to edid caps
- * @return
- * void
- * */
+ * @edid_caps: [in] pointer to edid caps
+ *
+ * Return: void
+ */
enum dc_edid_status dm_helpers_parse_edid_caps(
struct dc_link *link,
const struct dc_edid *edid,
@@ -255,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* Accessing the connector state is required for vcpi_slots allocation
* and directly relies on behaviour in commit check
* that blocks before commit guaranteeing that the state
- * is not gonna be swapped while still in use in commit tail */
+ * is not gonna be swapped while still in use in commit tail
+ */
if (!aconnector || !aconnector->mst_root)
return false;
@@ -282,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
- * sequence. copy DRM MST allocation to dc */
+ * sequence. copy DRM MST allocation to dc
+ */
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
@@ -426,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx,
total = log_ctx->pos + n + 1;
if (total > log_ctx->size) {
- char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
+ char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
if (buf) {
memcpy(buf, log_ctx->buf, log_ctx->pos);
@@ -633,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
if (ret < 0) {
- DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
+ DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
return false;
}
@@ -655,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
}
- DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
+ DC_LOG_DC("%s: success = %d\n", __func__, success);
return success;
}
@@ -664,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
{
unsigned char data[16] = {0};
- DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Start %s\n", __func__);
// Step 2
data[0] = 'P';
@@ -722,7 +724,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
return;
- DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Done %s\n", __func__);
}
/* MST Dock */
@@ -995,9 +997,8 @@ void dm_helpers_override_panel_settings(
struct dc_panel_config *panel_config)
{
// Feature DSC
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
panel_config->dsc.disable_dsc_edp = true;
- }
}
void *dm_helpers_allocate_gpu_mem(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 19f543ba7205..51467f132c26 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work)
/* Call a DAL subcomponent which registered for interrupt notification
* at INTERRUPT_LOW_IRQ_CONTEXT.
- * (The most common use is HPD interrupt) */
+ * (The most common use is HPD interrupt)
+ */
}
/*
@@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
if (handler_removed == false) {
/* Not necessarily an error - caller may not
- * know the context. */
+ * know the context.
+ */
return NULL;
}
@@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params,
static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
irq_handler_idx handler_idx)
{
- if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
return false;
}
@@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
/* This pointer will be stored by code which requested interrupt
* registration.
* The same pointer will be needed in order to unregister the
- * interrupt. */
+ * interrupt.
+ */
DRM_DEBUG_KMS(
"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
@@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
if (handler_list == NULL) {
/* If we got here, it means we searched all irq contexts
- * for this irq source, but the handler was not found. */
+ * for this irq source, but the handler was not found.
+ */
DRM_ERROR(
"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
ih, irq_source);
@@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
/* The handler was removed from the table,
* it means it is safe to flush all the 'work'
- * (because no code can schedule a new one). */
+ * (because no code can schedule a new one).
+ */
lh = &adev->dm.irq_handler_list_low_tab[src];
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
@@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
if (!list_empty(hnd_list_l)) {
- list_for_each_safe (entry, tmp, hnd_list_l) {
+ list_for_each_safe(entry, tmp, hnd_list_l) {
handler = list_entry(
entry,
struct amdgpu_dm_irq_handler_data,
@@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
if (list_empty(handler_list))
return;
- list_for_each_entry (handler_data, handler_list, list) {
+ list_for_each_entry(handler_data, handler_list, list) {
if (queue_work(system_highpri_wq, &handler_data->work)) {
work_queued = true;
break;
@@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
&adev->dm.irq_handler_list_high_tab[irq_source],
list) {
/* Call a subcomponent which registered for immediate
- * interrupt notification */
+ * interrupt notification
+ */
handler_data->handler(handler_data->handler_arg);
}
@@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
return 0;
}
-static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
{
switch (type) {
case AMDGPU_HPD_1:
@@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
@@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
static inline int dm_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state,
const enum irq_type dal_irq_type,
const char *func)
@@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
true);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
true);
@@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
false);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
false);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 46d0a8f57e55..943959012d04 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -296,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
+
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
if (!edid) {
@@ -619,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ uint8_t dpcd_bytes_to_read;
+ const uint8_t max_process_count = 30;
+ uint8_t process_count = 0;
+ u8 retry;
+ struct amdgpu_dm_connector *aconnector =
+ container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ mutex_lock(&aconnector->handle_mst_msg_ready);
+
+ while (process_count < max_process_count) {
+ u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+ process_count++;
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ if (dret != dpcd_bytes_to_read) {
+ DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+ switch (msg_rdy_type) {
+ case DOWN_REP_MSG_RDY_EVENT:
+ /* Only handle DOWN_REP_MSG_RDY case*/
+ esi[1] &= DP_DOWN_REP_MSG_RDY;
+ break;
+ case UP_REQ_MSG_RDY_EVENT:
+ /* Only handle UP_REQ_MSG_RDY case*/
+ esi[1] &= DP_UP_REQ_MSG_RDY;
+ break;
+ default:
+ /* Handle both cases*/
+ esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+ break;
+ }
+
+ if (!esi[1])
+ break;
+
+ /* handle MST irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+ esi,
+ ack,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ for (retry = 0; retry < 3; retry++) {
+ ssize_t wret;
+
+ wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ ack[1]);
+ if (wret == 1)
+ break;
+ }
+
+ if (retry == 3) {
+ DRM_ERROR("Failed to ack MST event.\n");
+ break;
+ }
+
+ drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+ dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
+ .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
@@ -717,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
&dsc_options,
0,
params[i].timing,
+ dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
&params[i].timing->dsc_cfg)) {
params[i].timing->flags.DSC = 1;
@@ -767,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
&dsc_options,
- (int) kbps, param.timing, &dsc_config);
+ (int) kbps, param.timing,
+ dc_link_get_highest_encoding_format(param.aconnector->dc_link),
+ &dsc_config);
return dsc_config.bits_per_pixel;
}
@@ -1005,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, &params[count].bw_range))
- params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ &stream->timing,
+ dc_link_get_highest_encoding_format(dc_link),
+ &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(dc_link));
count++;
}
@@ -1466,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, bw_range);
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 1e4ede1e57ab..37c820ab0fdb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+enum mst_msg_ready_type {
+ NONE_MSG_RDY_EVENT = 0,
+ DOWN_REP_MSG_RDY_EVENT = 1,
+ UP_REQ_MSG_RDY_EVENT = 2,
+ DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type);
+
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 322668973747..2198df96ed6f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
+ DRM_FORMAT_ARGB16161616F,
};
uint32_t format = plane_state->fb->format->format;
unsigned int i;
@@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier)
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev,
int pkrs = 0;
u32 gb_addr_config;
u8 i = 0;
- unsigned swizzle_r_x;
+ unsigned int swizzle_r_x;
uint64_t modifier_r_x;
uint64_t modifier_dcc_best;
uint64_t modifier_dcc_4k;
@@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane,
* caps list.
*/
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
+ (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
if (num_formats >= max_formats)
break;
@@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
}
- break;
+ } else {
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_OVERLAY:
- for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = overlay_formats[i];
+ }
+ break;
- formats[num_formats++] = overlay_formats[i];
- }
- break;
+ case DRM_PLANE_TYPE_CURSOR:
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_CURSOR:
- for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = cursor_formats[i];
+ }
+ break;
- formats[num_formats++] = cursor_formats[i];
+ default:
+ break;
}
- break;
}
return num_formats;
@@ -1459,6 +1468,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ unsigned int zpos = 1 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_zpos_immutable_property(plane, 255);
+ }
+
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 75284e2cec74..848c5b4bb301 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type(
if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
/* This clock is higher the validation clock.
* Than means the previous one is the highest
- * non-boosted one. */
+ * non-boosted one.
+ */
DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
dc_clks->num_levels, i);
dc_clks->num_levels = i > 0 ? i : 1;
@@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes(
* TODO: expand this to other ASICs
*/
if ((adev->asic_type >= CHIP_POLARIS10) &&
- (adev->asic_type <= CHIP_VEGAM) &&
- !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
- (void *)wm_with_clock_ranges))
- return true;
+ (adev->asic_type <= CHIP_VEGAM) &&
+ !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
+ (void *)wm_with_clock_ranges))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 4f61d4f257cd..08ce3bb8f640 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -166,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
if (vsync_rate_hz != 0) {
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+
num_frames_static = (30000 / frame_time_microsec) + 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 352e9afb85c6..e295a839ab47 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -24,7 +24,7 @@
*/
#include "dm_services.h"
-#include "conversion.h"
+#include "basics/conversion.h"
#define DIVIDER 10000
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 84aeccf36b4b..6d2924114a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -50,12 +50,11 @@ bool dal_vector_construct(
return true;
}
-static bool dal_vector_presized_costruct(
- struct vector *vector,
- struct dc_context *ctx,
- uint32_t count,
- void *initial_value,
- uint32_t struct_size)
+static bool dal_vector_presized_costruct(struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
{
uint32_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 27af9d3c2b73..4f005ae1516c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-static enum bp_result update_slot_layout_info(
- struct dc_bios *dcb,
- unsigned int i,
- struct slot_layout_info *slot_layout_info,
- unsigned int record_offset)
+static enum bp_result update_slot_layout_info(struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
{
unsigned int j;
struct bios_parser *bp;
@@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info(
}
-static enum bp_result get_bracket_layout_record(
- struct dc_bios *dcb,
- unsigned int bracket_layout_id,
- struct slot_layout_info *slot_layout_info)
+static enum bp_result get_bracket_layout_record(struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
{
unsigned int i;
unsigned int record_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index cce47d3f1a13..540d19efad8f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object(
}
/* from graphics_object_id, find display path which includes the object_id */
-static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(
- struct bios_parser *bp,
- struct graphics_object_id id)
+static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp,
+ struct graphics_object_id id)
{
unsigned int i;
struct graphics_object_id obj_id = {0};
@@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info(
return BP_RESULT_OK;
}
-static struct atom_hpd_int_record *get_hpd_record_for_path_v3(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record(
return NULL;
}
-static struct atom_connector_caps_record *get_connector_caps_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
return BP_RESULT_OK;
}
-static struct atom_connector_speed_record *get_connector_speed_cap_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 7ccd96959256..3db4ef564b99 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 2f7c8996b19d..3ba2e13d691d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -87,6 +87,14 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__LK2_BYPASS_DIV_MASK 0x000F0000L
+
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -436,6 +444,11 @@ static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
+static struct dcn314_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
{
int i, num_valid_sets;
@@ -715,6 +728,20 @@ static struct clk_mgr_funcs dcn314_funcs = {
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+}
+
void dcn314_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn314 *clk_mgr,
@@ -781,9 +808,11 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
- dce_clock_read_ss_info(&clk_mgr->base);
+
+ dcn314_read_ss_info_from_lut(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
- //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
+ clk_mgr->base.base.dprefclk_khz =
+ dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 171f84340eb2..e0670dafe260 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -28,6 +28,8 @@
#define __DCN314_CLK_MGR_H__
#include "clk_mgr_internal.h"
+#define NUM_CLOCK_SOURCES 5
+
struct dcn314_watermarks;
struct dcn314_smu_watermark_set {
@@ -40,6 +42,11 @@ struct clk_mgr_dcn314 {
struct dcn314_smu_watermark_set smu_wm_set;
};
+struct dcn314_ss_info_table {
+ uint32_t ss_divider;
+ uint32_t ss_percentage[NUM_CLOCK_SOURCES];
+};
+
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index d7de756301cf..0349631991b8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -55,14 +55,6 @@ struct IP_BASE
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
-static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } },
- { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } },
- { { 0x00017000, 0x02402000, 0, 0, 0, 0 } },
- { { 0x00017200, 0x02402400, 0, 0, 0, 0 } },
- { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } },
- { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } },
- { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } };
-
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -73,9 +65,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-#define REG(reg_name) \
- (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-
#define TO_CLK_MGR_DCN316(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn316, base)
@@ -577,36 +566,6 @@ static struct clk_mgr_funcs dcn316_funcs = {
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
-static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
-{
- /* get FbMult value */
- struct fixed31_32 pll_req;
- unsigned int fbmult_frac_val = 0;
- unsigned int fbmult_int_val = 0;
-
- /*
- * Register value of fbmult is in 8.16 format, we are converting to 31.32
- * to leverage the fix point operations available in driver
- */
-
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
-
- pll_req = dc_fixpt_from_int(fbmult_int_val);
-
- /*
- * since fractional part is only 16 bit in register definition but is 32 bit
- * in our fix point definiton, need to shift left by 16 to obtain correct value
- */
- pll_req.value |= fbmult_frac_val << 16;
-
- /* multiply by REFCLK period */
- pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
-
- /* integer part is now VCO frequency in kHz */
- return dc_fixpt_floor(pll_req);
-}
-
void dcn316_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn316 *clk_mgr,
@@ -660,7 +619,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.smu_present = true;
// Skip this for now as it did not work on DCN315, renable during bring up
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index cb992aca760d..0701d03b88a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz, prev_dppclk_khz;
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d133e4186a52..7cac14f493f6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1047,8 +1047,10 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
- struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ bool lock)
{
int i;
@@ -3582,9 +3584,9 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->block_sequence_steps);
/* Clear update flags so next flip doesn't have redundant programming
* (if there's no stream update, the update flags are not cleared).
+ * Surface updates are cleared unconditionally at the beginning of each flip,
+ * so no need to clear here.
*/
- if (top_pipe_to_program->plane_state)
- top_pipe_to_program->plane_state->update_flags.raw = 0;
if (top_pipe_to_program->stream)
top_pipe_to_program->stream->update_flags.raw = 0;
}
@@ -4088,9 +4090,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_mpc_policy;
- bool temp_dynamic_odm_policy;
- bool temp_subvp_policy;
+ enum pipe_split_policy tmp_mpc_policy = 0;
+ bool temp_dynamic_odm_policy = 0;
+ bool temp_subvp_policy = 0;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
@@ -4284,7 +4286,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c
return false;
}
-static bool full_update_required(struct dc_surface_update *srf_updates,
+static bool full_update_required(struct dc *dc,
+ struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update,
struct dc_stream_state *stream)
@@ -4292,6 +4295,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
int i;
struct dc_stream_status *stream_status;
+ const struct dc_state *context = dc->current_state;
for (i = 0; i < surface_count; i++) {
if (srf_updates &&
@@ -4302,7 +4306,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
- srf_updates[i].blend_tf))
+ srf_updates[i].blend_tf ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ !is_surface_in_context(context, srf_updates[i].surface)))
return true;
}
@@ -4340,18 +4348,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates,
if (stream_status == NULL || stream_status->plane_count != surface_count)
return true;
}
+ if (dc->idle_optimizations_allowed)
+ return true;
return false;
}
-static bool fast_update_only(struct dc_fast_update *fast_update,
+static bool fast_update_only(struct dc *dc,
+ struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update,
struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
- && !full_update_required(srf_updates, surface_count, stream_update, stream);
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
bool dc_update_planes_and_stream(struct dc *dc,
@@ -4369,8 +4380,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting;
- bool is_plane_addition;
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
@@ -4423,7 +4434,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
}
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
@@ -4569,7 +4580,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) &&
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
@@ -5273,3 +5284,56 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
+
+/*****************************************************************************
+ * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ * ABM
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @pData: abm hw states
+ *
+ ****************************************************************************/
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return false;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++)
+ if (edp_links[i] == link)
+ break;
+
+ if (i == edp_num)
+ return false;
+
+ if (pipe->stream_res.abm &&
+ pipe->stream_res.abm->funcs->save_restore)
+ return pipe->stream_res.abm->funcs->save_restore(
+ pipe->stream_res.abm,
+ i,
+ pData);
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index cb2bf9a466f5..f99ec1b0efaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type(
ret = true;
return ret;
}
+
static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
{
enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 18e098568cb4..0d19d4cd1916 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
return link->dc->link_srv->dp_get_verified_link_cap(link);
}
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link)
+{
+ if (dc_is_dp_signal(link->connector_signal)) {
+ if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE &&
+ link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE)
+ return DC_LINK_ENCODING_HDMI_TMDS;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_8b_10b_ENCODING)
+ return DC_LINK_ENCODING_DP_8b_10b;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_128b_132b_ENCODING)
+ return DC_LINK_ENCODING_DP_128b_132b;
+ } else if (dc_is_hdmi_signal(link->connector_signal)) {
+ }
+
+ return DC_LINK_ENCODING_UNSPECIFIED;
+}
+
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
return link->dc->link_srv->dp_is_sink_present(link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2f3d9a698486..d0f4b86cadf1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -69,9 +69,16 @@
#include "../dcn32/dcn32_resource.h"
#include "../dcn321/dcn321_resource.h"
+#define VISUAL_CONFIRM_BASE_DEFAULT 3
+#define VISUAL_CONFIRM_BASE_MIN 1
+#define VISUAL_CONFIRM_BASE_MAX 10
+#define VISUAL_CONFIRM_DPP_OFFSET 3
#define DC_LOGGER_INIT(logger)
+#define HEAD_NOT_IN_ODM -2
+#define UNABLE_TO_SPLIT -1
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -740,7 +747,12 @@ int get_num_mpc_splits(struct pipe_ctx *pipe)
int get_num_odm_splits(struct pipe_ctx *pipe)
{
int odm_split_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ struct pipe_ctx *next_pipe = NULL;
+
+ while (pipe->top_pipe)
+ pipe = pipe->top_pipe;
+
+ next_pipe = pipe->next_odm_pipe;
while (next_pipe) {
odm_split_count++;
next_pipe = next_pipe->next_odm_pipe;
@@ -753,32 +765,35 @@ int get_num_odm_splits(struct pipe_ctx *pipe)
return odm_split_count;
}
-static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+static int get_odm_split_index(struct pipe_ctx *pipe_ctx)
{
- *split_count = get_num_odm_splits(pipe_ctx);
- *split_idx = 0;
- if (*split_count == 0) {
- /*Check for mpc split*/
- struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
-
- *split_count = get_num_mpc_splits(pipe_ctx);
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_idx)++;
- split_pipe = split_pipe->top_pipe;
- }
+ struct pipe_ctx *split_pipe = NULL;
+ int index = 0;
- /* MPO window on right side of ODM split */
- if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
- (*split_idx)++;
- } else {
- /*Get odm split index*/
- struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+ while (pipe_ctx->top_pipe)
+ pipe_ctx = pipe_ctx->top_pipe;
- while (split_pipe) {
- (*split_idx)++;
- split_pipe = split_pipe->prev_odm_pipe;
- }
+ split_pipe = pipe_ctx->prev_odm_pipe;
+
+ while (split_pipe) {
+ index++;
+ split_pipe = split_pipe->prev_odm_pipe;
}
+
+ return index;
+}
+
+static int get_mpc_split_index(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ int index = 0;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ index++;
+ split_pipe = split_pipe->top_pipe;
+ }
+
+ return index;
}
/*
@@ -800,82 +815,357 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx)
+static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
- const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect surf_clip = plane_state->clip_rect;
- bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- int split_count, split_idx;
+ struct rect rec;
+ int r0_x_end = r0->x + r0->width;
+ int r1_x_end = r1->x + r1->width;
+ int r0_y_end = r0->y + r0->height;
+ int r1_y_end = r1->y + r1->height;
+
+ rec.x = r0->x > r1->x ? r0->x : r1->x;
+ rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x;
+ rec.y = r0->y > r1->y ? r0->y : r1->y;
+ rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y;
+
+ /* in case that there is no intersection */
+ if (rec.width < 0 || rec.height < 0)
+ memset(&rec, 0, sizeof(rec));
+
+ return rec;
+}
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- split_idx = 0;
+static struct rect shift_rec(const struct rect *rec_in, int x, int y)
+{
+ struct rect rec_out = *rec_in;
+
+ rec_out.x += x;
+ rec_out.y += y;
+
+ return rec_out;
+}
+
+static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int odm_slice_count = get_num_odm_splits(pipe_ctx) + 1;
+ int odm_slice_idx = get_odm_split_index(pipe_ctx);
+ bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count;
+ int h_active = stream->timing.h_addressable +
+ stream->timing.h_border_left +
+ stream->timing.h_border_right;
+ int odm_slice_width = h_active / odm_slice_count;
+ struct rect odm_rec;
+
+ odm_rec.x = odm_slice_width * odm_slice_idx;
+ odm_rec.width = is_last_odm_slice ?
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
+ odm_rec.y = 0;
+ odm_rec.height = stream->timing.v_addressable +
+ stream->timing.v_border_bottom +
+ stream->timing.v_border_top;
+
+ return odm_rec;
+}
+static struct rect calculate_plane_rec_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ const struct rect *rec_in)
+{
/*
- * Only the leftmost ODM pipe should be offset by a nonzero distance
+ * The following diagram shows an example where we map a 1920x1200
+ * desktop to a 2560x1440 timing with a plane rect in the middle
+ * of the screen. To map a plane rect from Stream Source to Timing
+ * Active space, we first multiply stream scaling ratios (i.e 2304/1920
+ * horizontal and 1440/1200 vertical) to the plane's x and y, then
+ * we add stream destination offsets (i.e 128 horizontal, 0 vertical).
+ * This will give us a plane rect's position in Timing Active. However
+ * we have to remove the fractional. The rule is that we find left/right
+ * and top/bottom positions and round the value to the adjacent integer.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (1920 x 1200) ^ |
+ * | y |
+ * | <------- w --------|> |
+ * | __________________V |
+ * |<-- x -->|Plane//////////////| ^ |
+ * | |(pre scale)////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| h |
+ * | |///////////////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| V |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space
+ * ---------------------------------
+ *
+ * Timing Active (2560 x 1440)
+ * __________________________________________________
+ * |*****| Stteam Destination (2304 x 1440) |*****|
+ * |*****| |*****|
+ * |<128>| |*****|
+ * |*****| __________________ |*****|
+ * |*****| |Plane/////////////| |*****|
+ * |*****| |(post scale)//////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****|______________________________________|*****|
+ *
+ * So the resulting formulas are shown below:
+ *
+ * recout_x = 128 + round(plane_x * 2304 / 1920)
+ * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
+ * recout_y = 0 + round(plane_y * 1440 / 1280)
+ * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
+ *
+ * NOTE: fixed point division is not error free. To reduce errors
+ * introduced by fixed point division, we divide only after
+ * multiplication is complete.
*/
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* MPO window on right side of ODM split */
- data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) *
- stream->dst.width / stream->src.width;
- } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
- data->recout.x = stream->dst.x;
- if (stream->src.x < surf_clip.x)
- data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
- / stream->src.width;
- } else
- data->recout.x = 0;
-
- if (stream->src.x > surf_clip.x)
- surf_clip.width -= stream->src.x - surf_clip.x;
- data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
- if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
- data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
-
- data->recout.y = stream->dst.y;
- if (stream->src.y < surf_clip.y)
- data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
- / stream->src.height;
- else if (stream->src.y > surf_clip.y)
- surf_clip.height -= stream->src.y - surf_clip.y;
-
- data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
- if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
- data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
-
- /* Handle h & v split */
- if (split_tb) {
- ASSERT(data->recout.height % 2 == 0);
- data->recout.height /= 2;
- } else if (split_count) {
- if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* extra pixels in the division remainder need to go to pipes after
- * the extra pixel index minus one(epimo) defined here as:
- */
- int epimo = split_count - data->recout.width % (split_count + 1);
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect rec_out = {0};
+ struct fixed31_32 temp;
- data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
- if (split_idx > epimo)
- data->recout.x += split_idx - epimo - 1;
- ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
- data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
- } else {
- /* odm */
- if (split_idx == split_count) {
- /* rightmost pipe is the remainder recout */
- data->recout.width -= data->h_active * split_count - data->recout.x;
-
- /* ODM combine cases with MPO we can get negative widths */
- if (data->recout.width < 0)
- data->recout.width = 0;
-
- data->recout.x = 0;
- } else
- data->recout.width = data->h_active - data->recout.x;
- }
+ temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width,
+ stream->src.width);
+ rec_out.x = stream->dst.x + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->x + rec_in->width) * stream->dst.width,
+ stream->src.width);
+ rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x;
+
+ temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height,
+ stream->src.height);
+ rec_out.y = stream->dst.y + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->y + rec_in->height) * stream->dst.height,
+ stream->src.height);
+ rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y;
+
+ return rec_out;
+}
+
+static struct rect calculate_mpc_slice_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ struct rect *plane_clip_rec)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int mpc_slice_count = get_num_mpc_splits(pipe_ctx) + 1;
+ int mpc_slice_idx = get_mpc_split_index(pipe_ctx);
+ int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
+ struct rect mpc_rec;
+
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ ASSERT(mpc_slice_count == 1 ||
+ stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ mpc_rec.width % 2 == 0);
+
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ if (mpc_slice_idx > epimo) {
+ mpc_rec.x += mpc_slice_idx - epimo - 1;
+ mpc_rec.width += 1;
}
+
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ ASSERT(mpc_rec.height % 2 == 0);
+ mpc_rec.height /= 2;
+ }
+ return mpc_rec;
+}
+
+static void adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ int dpp_offset, base_offset;
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
+ return;
+
+ dpp_offset = pipe_ctx->plane_res.dpp->inst * VISUAL_CONFIRM_DPP_OFFSET;
+
+ if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) &&
+ dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX)
+ base_offset = dc->debug.visual_confirm_rect_height;
+ else
+ base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
+
+ recout->height -= base_offset;
+ recout->height -= dpp_offset;
+}
+
+/*
+ * The function maps a plane clip from Stream Source Space to ODM Slice Space
+ * and calculates the rec of the overlapping area of MPC slice of the plane
+ * clip, ODM slice associated with the pipe context and stream destination rec.
+ */
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
+{
+ /*
+ * A plane clip represents the desired plane size and position in Stream
+ * Source Space. Stream Source is the destination where all planes are
+ * blended (i.e. positioned, scaled and overlaid). It is a canvas where
+ * all planes associated with the current stream are drawn together.
+ * After Stream Source is completed, we will further scale and
+ * reposition the entire canvas of the stream source to Stream
+ * Destination in Timing Active Space. This could be due to display
+ * overscan adjustment where we will need to rescale and reposition all
+ * the planes so they can fit into a TV with overscan or downscale
+ * upscale features such as GPU scaling or VSR.
+ *
+ * This two step blending is a virtual procedure in software. In
+ * hardware there is no such thing as Stream Source. all planes are
+ * blended once in Timing Active Space. Software virtualizes a Stream
+ * Source space to decouple the math complicity so scaling param
+ * calculation focuses on one step at a time.
+ *
+ * In the following two diagrams, user applied 10% overscan adjustment
+ * so the Stream Source needs to be scaled down a little before mapping
+ * to Timing Active Space. As a result the Plane Clip is also scaled
+ * down by the same ratio, Plane Clip position (i.e. x and y) with
+ * respect to Stream Source is also scaled down. To map it in Timing
+ * Active Space additional x and y offsets from Stream Destination are
+ * added to Plane Clip as well.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (3840 x 2160) ^ |
+ * | y |
+ * | | |
+ * | __________________V |
+ * |<-- x -->|Plane Clip/////////| |
+ * | |(pre scale)////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * | y_____________________________________________ |
+ * |x |Stream Destination (3456 x 1944) | |
+ * | | | |
+ * | | __________________ | |
+ * | | |Plane Clip////////| | |
+ * | | |(post scale)//////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | | |
+ * | | | |
+ * | |____________________________________________| |
+ * |__________________________________________________|
+ *
+ *
+ * In Timing Active Space a plane clip could be further sliced into
+ * pieces called MPC slices. Each Pipe Context is responsible for
+ * processing only one MPC slice so the plane processing workload can be
+ * distributed to multiple DPP Pipes. MPC slices could be blended
+ * together to a single ODM slice. Each ODM slice is responsible for
+ * processing a portion of Timing Active divided horizontally so the
+ * output pixel processing workload can be distributed to multiple OPP
+ * pipes. All ODM slices are mapped together in ODM block so all MPC
+ * slices belong to different ODM slices could be pieced together to
+ * form a single image in Timing Active. MPC slices must belong to
+ * single ODM slice. If an MPC slice goes across ODM slice boundary, it
+ * needs to be divided into two MPC slices one for each ODM slice.
+ *
+ * In the following diagram the output pixel processing workload is
+ * divided horizontally into two ODM slices one for each OPP blend tree.
+ * OPP0 blend tree is responsible for processing left half of Timing
+ * Active, while OPP2 blend tree is responsible for processing right
+ * half.
+ *
+ * The plane has two MPC slices. However since the right MPC slice goes
+ * across ODM boundary, two DPP pipes are needed one for each OPP blend
+ * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree).
+ *
+ * Assuming that we have a Pipe Context associated with OPP0 and DPP1
+ * working on processing the plane in the diagram. We want to know the
+ * width and height of the shaded rectangle and its relative position
+ * with respect to the ODM slice0. This is called the recout of the pipe
+ * context.
+ *
+ * Planes can be at arbitrary size and position and there could be an
+ * arbitrary number of MPC and ODM slices. The algorithm needs to take
+ * all scenarios into account.
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) |
+ * | y | |
+ * | | <- w -> |
+ * | _____V________|____ |
+ * | |DPP0 ^ |DPP1 |DPP2| |
+ * |<------ x |-----|->|/////| | |
+ * | | | |/////| | |
+ * | | h |/////| | |
+ * | | | |/////| | |
+ * | |_____V__|/////|____| |
+ * | | |
+ * | | |
+ * | | |
+ * |_________________________|________________________|
+ *
+ *
+ */
+ struct rect plane_clip;
+ struct rect mpc_slice_of_plane_clip;
+ struct rect odm_slice;
+ struct rect overlapping_area;
+
+ plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
+ &pipe_ctx->plane_state->clip_rect);
+ /* guard plane clip from drawing beyond stream dst here */
+ plane_clip = intersect_rec(&plane_clip,
+ &pipe_ctx->stream->dst);
+ mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
+ pipe_ctx, &plane_clip);
+ odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
+ overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice);
+ /* shift the overlapping area so it is with respect to current ODM
+ * slice's position
+ */
+ pipe_ctx->plane_res.scl_data.recout = shift_rec(
+ &overlapping_area,
+ -odm_slice.x, -odm_slice.y);
+
+ adjust_recout_for_visual_confirm(&pipe_ctx->plane_res.scl_data.recout,
+ pipe_ctx);
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -997,34 +1287,25 @@ static void calculate_init_and_vp(
static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = plane_state->src_rect;
+ struct rect recout_dst_in_active_timing;
+ struct rect recout_clip_in_active_timing;
+ struct rect recout_clip_in_recout_dst;
+ struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- /*
- * recout full is what the recout would have been if we didnt clip
- * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
- * offsets of recout within recout full because those are the directions
- * we scan from and therefore the only ones that affect inits.
- */
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height;
- if (pipe_ctx->prev_odm_pipe && split_idx)
- ro_lb = data->h_active * split_idx - recout_full_x;
- else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
- ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
- else
- ro_lb = data->recout.x - recout_full_x;
- ro_tb = data->recout.y - recout_full_y;
- ASSERT(ro_lb >= 0 && ro_tb >= 0);
-
+ recout_clip_in_active_timing = shift_rec(
+ &data->recout, odm_slice.x, odm_slice.y);
+ recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
+ pipe_ctx, &plane_state->dst_rect);
+ recout_clip_in_recout_dst = shift_rec(&recout_clip_in_active_timing,
+ -recout_dst_in_active_timing.x,
+ -recout_dst_in_active_timing.y);
+ ASSERT(recout_clip_in_recout_dst.x >= 0 &&
+ recout_clip_in_recout_dst.y >= 0);
/*
* Work in recout rotation since that requires less transformations
*/
@@ -1042,7 +1323,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width,
data->taps.h_taps,
@@ -1052,7 +1333,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.width);
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width / vpc_div,
data->taps.h_taps_c,
@@ -1062,7 +1343,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport_c.width);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height,
data->taps.v_taps,
@@ -1072,7 +1353,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.height);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height / vpc_div,
data->taps.v_taps_c,
@@ -1097,6 +1378,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1121,30 +1403,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dst.y += timing->v_border_top;
/* Calculate H and V active size */
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
- timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
- timing->v_border_top + timing->v_border_bottom;
- if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) {
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
-
- DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1);
- } /* ODM + windows MPO, where window is on either right or left ODM half */
- else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) {
-
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
-
- DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->top_pipe->pipe_idx,
- pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1);
- }
+ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
+ pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
@@ -1449,7 +1710,24 @@ static int acquire_first_split_pipe(
return i;
} else if (split_pipe->prev_odm_pipe &&
split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
+
+ // Fix case where ODM slice has child planes
+ // Re-attach child planes
+ struct pipe_ctx *temp_head_pipe = resource_get_head_pipe_for_stream(res_ctx, split_pipe->stream);
+
+ if (split_pipe->bottom_pipe && temp_head_pipe) {
+
+ struct pipe_ctx *temp_tail_pipe = resource_get_tail_pipe(res_ctx, temp_head_pipe);
+
+ if (temp_tail_pipe) {
+
+ split_pipe->bottom_pipe->top_pipe = temp_tail_pipe;
+ temp_tail_pipe->bottom_pipe = split_pipe->bottom_pipe;
+ }
+ }
+
split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
+
if (split_pipe->next_odm_pipe)
split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
@@ -1457,6 +1735,11 @@ static int acquire_first_split_pipe(
resource_build_scaling_params(split_pipe->prev_odm_pipe);
memset(split_pipe, 0, sizeof(*split_pipe));
+
+ // We cannot split if head pipe is not odm
+ if (temp_head_pipe && !temp_head_pipe->next_odm_pipe && !temp_head_pipe->prev_odm_pipe)
+ return HEAD_NOT_IN_ODM;
+
split_pipe->stream_res.tg = pool->timing_generators[i];
split_pipe->plane_res.hubp = pool->hubps[i];
split_pipe->plane_res.ipp = pool->ipps[i];
@@ -1469,7 +1752,7 @@ static int acquire_first_split_pipe(
return i;
}
}
- return -1;
+ return UNABLE_TO_SPLIT;
}
bool dc_add_plane_to_context(
@@ -1521,6 +1804,10 @@ bool dc_add_plane_to_context(
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ else if (pipe_idx == HEAD_NOT_IN_ODM)
+ break;
+ else
+ ASSERT(false);
}
if (!free_pipe) {
@@ -1677,12 +1964,14 @@ bool dc_add_plane_to_context(
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
free_pipe->stream->src.x + free_pipe->stream->src.width/2))) {
if (!free_pipe->next_odm_pipe &&
- tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
+ tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe &&
+ tail_pipe->next_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) {
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
}
if (!free_pipe->prev_odm_pipe &&
- tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
+ tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe &&
+ tail_pipe->prev_odm_pipe->bottom_pipe->plane_state == free_pipe->plane_state) {
free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6e11d2b701f8..ea3d4b328e8e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -306,6 +306,32 @@ bool dc_optimize_timing_for_fsft(
}
#endif
+static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
+{
+ uint32_t refresh_rate;
+ struct dc *dc = stream->ctx->dc;
+
+ refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
+ stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
+
+ /* If there's any stream that fits the SubVP high refresh criteria,
+ * we must return true. This is because cursor updates are asynchronous
+ * with full updates, so we could transition into a SubVP config and
+ * remain in HW cursor mode if there's no cursor update which will
+ * then cause corruption.
+ */
+ if ((refresh_rate >= 120 && refresh_rate <= 165 &&
+ stream->timing.v_addressable >= 1440 &&
+ stream->timing.v_addressable <= 2160) &&
+ (dc->current_state->stream_count > 1 ||
+ (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
+ return true;
+
+ return false;
+}
+
/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -334,12 +360,13 @@ bool dc_stream_set_cursor_attributes(
/* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
* Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz
- *
- * [< 120hz is a requirement for SubVP configs]
+ * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
+ * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
+ * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
*/
if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) {
+ if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
+ return false;
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 63948170fd6d..eadb53853131 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -40,12 +40,14 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
+struct abm_save_restore;
+
/* forward declaration */
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.241"
+#define DC_VER "3.2.244"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -506,7 +508,7 @@ enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_DISALLOW,
};
-/**
+/*
* struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
@@ -728,7 +730,7 @@ struct resource_pool;
struct dce_hwseq;
struct link_service;
-/**
+/*
* struct dc_debug_options - DC debug struct
*
* This struct provides a simple mechanism for developers to change some
@@ -756,7 +758,7 @@ struct dc_debug_options {
bool use_max_lb;
enum dcc_option disable_dcc;
- /**
+ /*
* @pipe_split_policy: Define which pipe split policy is used by the
* display core.
*/
@@ -861,6 +863,7 @@ struct dc_debug_options {
bool psr_skip_crtc_disable;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
+ uint32_t fixed_vs_aux_delay_config_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
bool allow_sw_cursor_fallback;
@@ -1334,7 +1337,7 @@ struct dc_validation_set {
struct dc_stream_state *stream;
/**
- * @plane_state: Surface state
+ * @plane_states: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
@@ -1409,10 +1412,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+void dc_set_disable_128b_132b_stream_overhead(bool disable);
+
/* The function returns minimum bandwidth required to drive a given timing
* return - minimum required timing bandwidth in kbps.
*/
-uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
/*
@@ -1514,6 +1521,7 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
/* Drive settings read from integrated info table */
@@ -1849,6 +1857,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
*/
const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+/* Get the highest encoding format that the link supports; highest meaning the
+ * encoding format which supports the maximum bandwidth.
+ *
+ * @link - a link with DP RX connection
+ * return - highest encoding format link supports.
+ */
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link);
+
/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
* to a link with dp connector signal type.
* @link - a link with dp connector signal type
@@ -2230,6 +2246,11 @@ void dc_z10_save_init(struct dc *dc);
bool dc_is_dmub_outbox_supported(struct dc *dc);
bool dc_enable_dmub_notifications(struct dc *dc);
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData);
+
void dc_enable_dmub_outbox(struct dc *dc);
bool dc_process_dmub_aux_transfer_async(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c753c6f30dd7..24433409d7de 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -381,6 +381,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
{
union dmub_rb_cmd cmd = { 0 };
+ if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
+ return;
+
memset(&cmd, 0, sizeof(cmd));
/* Prepare fw command */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9491b76d61f5..fe3078b8789e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
@@ -81,6 +82,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0ce7728a5a4b..14d7804b70b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -189,7 +189,6 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
- unsigned int delay_disable_aux_intercept_ms;
};
struct dc_edid_caps {
@@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_overall_throughput_0_mps; /* In MPs */
uint32_t branch_overall_throughput_1_mps; /* In MPs */
uint32_t branch_max_line_width;
- bool is_dp;
+ bool is_dp; /* Decoded format */
};
struct dc_golden_table {
@@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type {
DC_MEM_ALLOC_TYPE_AGP
};
+enum dc_link_encoding_format {
+ DC_LINK_ENCODING_UNSPECIFIED = 0,
+ DC_LINK_ENCODING_DP_8b_10b,
+ DC_LINK_ENCODING_DP_128b_132b,
+ DC_LINK_ENCODING_HDMI_TMDS,
+ DC_LINK_ENCODING_HDMI_FRL
+};
+
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_SU_1 = 1,
@@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
+struct backlight_settings {
+ uint32_t backlight_millinits;
+};
+
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 63009db8b5a7..b87bfecb7755 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu)
}
static bool dce_dmcu_load_iram(struct dmcu *dmcu,
- unsigned int start_offset,
- const char *src,
- unsigned int bytes)
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
unsigned int count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 6d1b01c267b7..4f552c3e7663 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
- struct dce_i2c_hw *dce_i2c_hw,
- uint32_t timeout,
- enum i2c_channel_operation_result expected_result)
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
{
enum i2c_channel_operation_result result;
uint32_t i = 0;
@@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-static bool dce_i2c_hw_engine_submit_payload(
- struct dce_i2c_hw *dce_i2c_hw,
- struct i2c_payload *payload,
- bool middle_of_transaction,
- uint32_t speed)
+static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_payload *payload,
+ bool middle_of_transaction,
+ uint32_t speed)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index f1aeb6d1967c..e188447c8156 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine(
return true;
}
+
bool dce_i2c_engine_acquire_sw(
struct dce_i2c_sw *dce_i2c_sw,
struct ddc *ddc_handle)
@@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw(
return result;
}
-
-
-
-static void dce_i2c_sw_engine_submit_channel_request(
- struct dce_i2c_sw *engine,
- struct i2c_request_transaction_data *req)
+static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine,
+ struct i2c_request_transaction_data *req)
{
struct ddc *ddc = engine->ddc;
uint16_t clock_delay_div_4 = engine->clock_delay >> 2;
@@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_FAILED;
}
-static bool dce_i2c_sw_engine_submit_payload(
- struct dce_i2c_sw *engine,
- struct i2c_payload *payload,
- bool middle_of_transaction)
+static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
+ struct i2c_payload *payload,
+ bool middle_of_transaction)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 2fb9572ce25d..d3e6544022b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -27,6 +27,7 @@
#include "dmub_abm_lcd.h"
#include "dc.h"
#include "core_types.h"
+#include "dmub_cmd.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
@@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane
return ret;
}
+/*****************************************************************************
+ * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's
+ * Varibright states for LCD only. OLED is TBD
+ * @abm: used to check get dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and abm parameters
+ *
+ *
+ ***************************************************************************/
+static bool dmub_abm_save_restore_ex(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
+{
+ bool ret = false;
+ unsigned int feature_support;
+ struct dc_context *dc = abm->ctx;
+
+ feature_support = abm_feature_support(abm, panel_inst);
+
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_save_restore(dc, panel_inst, pData);
+
+ return ret;
+}
+
static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
bool ret = false;
@@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = {
.get_target_backlight = dmub_abm_get_target_backlight_ex,
.init_abm_config = dmub_abm_init_config_ex,
.set_abm_pause = dmub_abm_set_pause_ex,
+ .save_restore = dmub_abm_save_restore_ex,
.set_pipe_ex = dmub_abm_set_pipe_ex,
.set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 39da73eba86e..592a8f7a1c6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
return true;
}
+
+/*****************************************************************************
+ * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+
+ * un-pause
+ * @dc: dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and exchange abm parameters
+ *
+ * When called Pause will get abm data and store in pData, and un-pause will
+ * set/apply abm data stored in pData.
+ *
+ *****************************************************************************/
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
+{
+ union dmub_rb_cmd cmd;
+ uint8_t panel_mask = 0x01 << panel_inst;
+ unsigned int bytes = sizeof(struct abm_save_restore);
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_save_restore.header.type = DMUB_CMD__ABM;
+ cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE;
+
+ cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_save_restore.abm_init_config_data.bytes = bytes;
+ cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
+
+ cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ // Copy iramtable data into local structure
+ memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
index 00b4e268768e..853564d7f471 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -28,6 +28,8 @@
#include "abm.h"
+struct abm_save_restore;
+
void dmub_abm_init(struct abm *abm, uint32_t backlight);
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
@@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm,
unsigned int inst);
bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst);
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
bool dmub_abm_set_backlight_level(struct abm *abm,
unsigned int backlight_pwm_u16_16,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 6c9ca43d1040..20d4d08a6a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 3935fd455f0f..061221394ce0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -58,13 +58,13 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
-#include "dce80_resource.h"
-
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#endif
+#include "dce80/dce80_resource.h"
+
#ifndef mmDP_DPHY_INTERNAL_CTRL
#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index b33955928bd0..7e140c35a0ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,9 +39,6 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
-#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
-#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
-#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
#define REG(reg)\
dpp->tf_regs->reg
@@ -591,18 +588,6 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
- int visual_confirm_on = 0;
- unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
-
- if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
- visual_confirm_on = 1;
-
- /* Check bounds to ensure the VC bar height was set to a sane value */
- if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
- (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
- visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
- }
-
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -613,8 +598,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
/* Number of RECOUT horizontal pixels */
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
- RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
+ RECOUT_HEIGHT, recout->height);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a50309039d08..9834b75f1837 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ee08b545aaea..377f1ba1a81b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
- if (!dcn10_is_dig_enabled(enc)) {
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
/*in DP_Alt_No_Connect case, we turn off the dig already,
after excuation the PHY w/a sequence, not allow touch PHY any more*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4492bc2392b6..e32d3246e82a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data(
enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
-
- int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
- int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
if (stream->link->test_pattern_enabled)
return;
@@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data(
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_cnt++;
-
- width = width / odm_cnt;
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
@@ -1080,29 +1080,32 @@ void dcn20_blank_pixel_data(
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
- dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- test_pattern,
- test_pattern_color_space,
- stream->timing.display_color_depth,
- &black_color,
- width,
- height,
- 0);
+ odm_pipe = pipe_ctx;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ while (odm_pipe->next_odm_pipe) {
dc->hwss.set_disp_pattern_generator(dc,
- odm_pipe,
- dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ pipe_ctx,
+ test_pattern,
test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
- width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ &black_color,
+ last_odm_slice_width,
+ v_active,
+ offset);
+
if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
/* when exiting dynamic ODM need to reinit DPG state for unused pipes */
struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
@@ -2123,6 +2126,15 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ dc_dmub_srv_p_state_delegate(dc,
+ true, context);
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->clks.fw_based_mclk_switching = true;
+ } else {
+ dc->clk_mgr->clks.fw_based_mclk_switching = false;
+ }
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
index 33fc9aa8621b..d07c04458d31 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
@@ -43,7 +43,7 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
index e44a37491c1e..b7efa777ec73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
@@ -32,6 +32,5 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d693ea42d033..82dfcf773b1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -854,8 +854,8 @@ bool dcn21_fast_validate_bw(struct dc *dc,
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index bf8864bc8a99..4cd4ae07d73d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -949,13 +949,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
}
void dcn30_prepare_bandwidth(struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context)
{
+ bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ /* Any transition into an FPO config should disable MCLK switching first to avoid
+ * driver and FW P-State synchronization issues.
+ */
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ dc->optimized_required = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ }
+
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
+ /*
+ * enabled -> enabled: do not disable
+ * enabled -> disabled: disable
+ * disabled -> enabled: don't care
+ * disabled -> disabled: don't care
+ */
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ dc_dmub_srv_p_state_delegate(dc, false, context);
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ /* After disabling P-State, restore the original value to ensure we get the correct P-State
+ * on the next optimize. */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index dfb8f62765f2..5bf4d0aa6230 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
}
-static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index fb06dc9a4893..d3a056c12b0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index abe4c12a10b5..f5bfcd2a0dbc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1705,8 +1705,8 @@ noinline bool dcn30_internal_validate_bw(
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 7aa628c21973..9002cb10a6ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -11,7 +11,8 @@
# Makefile for dcn30.
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
- dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
+ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
+ dcn301_optc.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
new file mode 100644
index 000000000000..b3cfcb887905
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn301_optc.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
+
+#include "dml/dcn30/dcn30_fpu.h"
+#include "dc_trace.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+/**
+ * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ * @optc: timing_generator instance.
+ * @params: parameters used for Dynamic Refresh Rate.
+ */
+void optc301_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+}
+
+
+void optc301_setup_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_8(OTG_TRIGA_CNTL, 0,
+ OTG_TRIGA_SOURCE_SELECT, 21,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+ OTG_TRIGA_POLARITY_SELECT, 0,
+ OTG_TRIGA_FREQUENCY_SELECT, 0,
+ OTG_TRIGA_DELAY, 0,
+ OTG_TRIGA_CLEAR, 1);
+}
+
+static struct timing_generator_funcs dcn30_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank_color = optc3_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc3_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc3_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc301_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .tg_init = optc3_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
+ .set_dwb_source = NULL,
+ .set_odm_bypass = optc3_set_odm_bypass,
+ .set_odm_combine = optc3_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_out_mux = optc3_set_out_mux,
+ .set_drr_trigger_window = optc3_set_drr_trigger_window,
+ .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc301_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+};
+
+void dcn301_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn30_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 4;
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
new file mode 100644
index 000000000000..b49585682a15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN301_H__
+#define __DC_OPTC_DCN301_H__
+
+#include "dcn20/dcn20_optc.h"
+#include "dcn30/dcn30_optc.h"
+
+void dcn301_timing_generator_init(struct optc *optc1);
+void optc301_setup_manual_trigger(struct timing_generator *optc);
+void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
+#endif /* __DC_OPTC_DCN301_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 3485fbb1093e..f856a4773c27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -42,7 +42,7 @@
#include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h"
-#include "dcn30/dcn30_optc.h"
+#include "dcn301/dcn301_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
@@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
- dcn30_timing_generator_init(tgn10);
+ dcn301_timing_generator_init(tgn10);
return &tgn10->base;
}
@@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 45956ef6f3f9..131b8b82afc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 65c1d754e2d6..8664f0c4c9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src)
{
- if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC)
src = PHYD32CLKF;
if (src == PHYD32CLKD)
@@ -284,19 +285,11 @@ void dccg31_enable_symclk32_le(
/* select one of the PHYD32CLKs as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, phyd32clk,
SYMCLK32_LE0_EN, 1);
break;
case 1:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, phyd32clk,
SYMCLK32_LE1_EN, 1);
@@ -319,19 +312,38 @@ void dccg31_disable_symclk32_le(
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, 0,
SYMCLK32_LE0_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, 0,
SYMCLK32_LE1_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ return;
+
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -660,10 +672,8 @@ void dccg31_init(struct dccg *dccg)
dccg31_disable_symclk32_se(dccg, 2);
dccg31_disable_symclk32_se(dccg, 3);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
- dccg31_disable_symclk32_le(dccg, 0);
- dccg31_disable_symclk32_le(dccg, 1);
- }
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
dccg31_disable_dpstreamclk(dccg, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 0902ce5eb8a1..e3caaacf7493 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index bd62502380d8..4596f3bac1b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output(
struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
struct dc_link *link;
- if (!dcn10_is_dig_enabled(enc))
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
return;
link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 0278bae50a9d..45143459eedd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
VID_STREAM_STATUS, 0,
10, 5000);
- /* Disable SDP tranmission */
+ /* Disable SDP transmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
SDP_STREAM_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 0746ed31d1d1..ad3f019a784f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = {
.disable_symclk32_se = dccg31_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
+ .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg314_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 6a9024aa3285..9b8e0f6f32b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -908,15 +908,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.root_clock_optimization = {
.bits = {
.dpp = true,
- .dsc = false,
- .hdmistream = false,
- .hdmichar = false,
- .dpstream = false,
- .symclk32_se = false,
- .symclk32_le = false,
- .symclk_fe = false,
- .physymclk = false,
- .dpiasymclk = false,
+ .dsc = true,
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = true,
+ .dpiasymclk = true,
}
},
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index df3a438abda8..2e3fa0fb8bd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -1659,7 +1659,7 @@ static int dcn315_populate_dml_pipes_from_context(
{
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 11e28e056cf7..61ceff6bc0b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync(
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+
+ /* Not valid for the WDIVIDER to be set to 0 */
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dccg32_get_pixel_rate_div(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d52d5feeb311..a87afb796f47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -47,11 +47,9 @@
#include "clk_mgr.h"
#include "dsc.h"
#include "dcn20/dcn20_optc.h"
-#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32_resource.h"
#include "link.h"
-#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 1cc09799f92d..0d1f18f8348e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
bool vsr_odm_support = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 5be242a1b82c..db9c55a09d9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -641,6 +641,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
uint8_t non_subvp_pipes = 0;
bool drr_pipe_found = false;
bool drr_psr_capable = false;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -649,8 +650,14 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
continue;
if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
@@ -662,7 +669,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
}
}
- if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable)
+ if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+ ((uint32_t)refresh_rate < 120))
result = true;
return result;
@@ -693,6 +701,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
bool drr_pipe_found = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vblank_psr_capable = false;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -701,8 +710,14 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
continue;
if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
@@ -715,7 +730,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
}
if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
+ ((uint32_t)refresh_rate < 120) &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
result = true;
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index f294f2f8c75b..57cf0358cc43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevels;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double HostVMInefficiencyFactor;
double VRatioClamped;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 43016c462251..eba51144fee7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index d9e049e7ff0a..07adb614366e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -31,6 +31,7 @@
#include "dml/dcn20/dcn20_fpu.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/display_mode_vba.h"
+#include "dml/dml_inline_defs.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
@@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -289,15 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing);
+
+ if (pipe->stream->adjust.v_total_min != 0)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ else
+ pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
- pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 9010c47476e9..32251af76935 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a95034801712..0f882b879b0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1040,7 +1040,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
uint32_t i;
uint8_t subvp_count = 0;
uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
- uint32_t refresh_rate = 0;
+ uint64_t refresh_rate = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1050,19 +1050,21 @@ static bool subvp_subvp_admissable(struct dc *dc,
if (pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
- / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
- if (refresh_rate < min_refresh)
- min_refresh = refresh_rate;
- if (refresh_rate > max_refresh)
- max_refresh = refresh_rate;
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+ if ((uint32_t)refresh_rate < min_refresh)
+ min_refresh = (uint32_t)refresh_rate;
+ if ((uint32_t)refresh_rate > max_refresh)
+ max_refresh = (uint32_t)refresh_rate;
subvp_count++;
}
}
if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
- (min_refresh >= 120 && max_refresh >= 120)))
+ (min_refresh >= 120 && max_refresh <= 165)))
result = true;
return result;
@@ -1715,8 +1717,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& !dc->config.enable_windowed_mpo_odm
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index a50e7f4dce42..ecea008f19d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule(
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
+ double LinesForPrefetchBandwidth = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
@@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule(
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
+ /* Clamp to oto for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
+ /* Clamp to equ for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
@@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule(
*DestinationLinesToRequestRowInVBlank =
dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
@@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
if (GPUVMEnable == true && HostVMEnable == true)
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 58dd62cce4bb..3966845c7694 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
+static bool disable_128b_132b_stream_overhead;
+
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
@@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+/* Need to account for padding due to pixel-to-symbol packing
+ * for uncompressed 128b/132b streams.
+ */
+static uint32_t apply_128b_132b_stream_overhead(
+ const struct dc_crtc_timing *timing, const uint32_t kbps)
+{
+ uint32_t total_kbps = kbps;
+
+ if (disable_128b_132b_stream_overhead)
+ return kbps;
+
+ if (!timing->flags.DSC) {
+ struct fixed31_32 bpp;
+ struct fixed31_32 overhead_factor;
+
+ bpp = dc_fixpt_from_int(kbps);
+ bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
+
+ /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
+ * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
+ */
+ overhead_factor = dc_fixpt_from_int(timing->h_addressable);
+ overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
+ overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
+ overhead_factor = dc_fixpt_div(
+ dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
+ overhead_factor);
+
+ total_kbps = dc_fixpt_ceil(
+ dc_fixpt_mul_int(overhead_factor, total_kbps));
+ }
+
+ return total_kbps;
+}
+
uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
@@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
kbps = kbps * 2 / 3;
}
+ if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ kbps = apply_128b_132b_stream_overhead(timing, kbps);
+
return kbps;
}
@@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
static uint32_t compute_bpp_x16_from_target_bandwidth(
@@ -133,6 +175,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
@@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, &config);
+ &options, link_encoding, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
- config.num_slices_h, &dsc_common_caps, timing, range);
+ config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
return is_dsc_possible;
}
@@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
@@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range(
/* populate output structure */
if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
/* max dsc target bpp */
range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
@@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16(
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const int num_slices_h,
+ const enum dc_link_encoding_format link_encoding,
int *target_bpp_x16)
{
struct dc_dsc_bw_range range;
@@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16(
*target_bpp_x16 = 0;
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
- num_slices_h, dsc_common_caps, timing, &range)) {
+ num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
if (policy->enable_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
@@ -796,6 +842,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -995,6 +1042,7 @@ static bool setup_dsc_config(
target_bandwidth_kbps,
timing,
num_slices_h,
+ link_encoding,
&target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
@@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
bool is_dsc_possible = false;
@@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, dsc_cfg);
+ timing, options, link_encoding, dsc_cfg);
return is_dsc_possible;
}
@@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
dsc_policy_disable_dsc_stream_overhead = disable;
}
+void dc_set_disable_128b_132b_stream_overhead(bool disable)
+{
+ disable_128b_132b_stream_overhead = disable;
+}
+
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index d2190a3320f6..33db15d69f23 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,6 +27,8 @@
#include "dm_services_types.h"
+struct abm_save_restore;
+
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
@@ -55,6 +57,10 @@ struct abm_funcs {
unsigned int bytes,
unsigned int inst);
bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
+ bool (*save_restore)(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
bool (*set_pipe_ex)(struct abm *abm,
unsigned int otg_inst,
unsigned int option,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 7254182b7c72..af6b9509d09d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -172,8 +172,6 @@ struct aux_engine_funcs {
struct aux_engine *engine,
uint8_t *returned_bytes);
bool (*is_engine_available)(struct aux_engine *engine);
- enum i2caux_engine_type (*get_engine_type)(
- const struct aux_engine *engine);
bool (*acquire)(
struct aux_engine *engine,
struct ddc *ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 8dc804bbe98b..93592281de32 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -123,6 +123,11 @@ struct dccg_funcs {
struct dccg *dccg,
int hpo_le_inst);
+ void (*set_symclk32_le_root_clock_gating)(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void (*set_physymclk)(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index c923b2af8510..37bc98faa7a0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -38,10 +38,9 @@
#define DCN_BASE__INST0_SEG2 0x000034C0
-static enum dc_irq_source to_dal_irq_source_dcn314(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index db9f1baa27e5..bce0428ad612 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link,
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- int width = pipe_ctx->stream->timing.h_addressable +
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
+ int h_active = pipe_ctx->stream->timing.h_addressable +
pipe_ctx->stream->timing.h_border_left +
pipe_ctx->stream->timing.h_border_right;
- int height = pipe_ctx->stream->timing.v_addressable +
+ int v_active = pipe_ctx->stream->timing.v_addressable +
pipe_ctx->stream->timing.v_border_bottom +
pipe_ctx->stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
memset(&params, 0, sizeof(params));
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+
switch (test_pattern) {
case DP_TEST_PATTERN_COLOR_SQUARES:
controller_test_pattern =
@@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
enum controller_dp_color_space controller_color_space;
- int opp_cnt = 1;
- int offset = 0;
- int dpg_width = width;
+ struct output_pixel_processor *odm_opp;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -502,36 +508,33 @@ static void set_crtc_test_pattern(struct dc_link *link,
break;
}
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
- dpg_width = width / opp_cnt;
- offset = dpg_width;
-
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
- odm_pipe,
+ pipe_ctx,
controller_test_pattern,
controller_color_space,
color_depth,
NULL,
- dpg_width,
- height,
+ odm_slice_width,
+ v_active,
offset);
- offset += offset;
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
@@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link,
/* restore bitdepth reduction */
resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
- int dpg_width;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- dpg_width = width / opp_cnt;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct output_pixel_processor *odm_opp;
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
odm_pipe,
@@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
+ odm_pipe,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index 586fe25c1702..dc1cb5478e08 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -108,6 +108,11 @@ static void enable_hpo_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ true);
link_res->hpo_dp_link_enc->funcs->enable_link_phy(
link_res->hpo_dp_link_enc,
link_settings,
@@ -122,6 +127,11 @@ static void disable_hpo_dp_link_output(struct dc_link *link,
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
link_res->hpo_dp_link_enc, signal);
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ false);
}
static void set_hpo_dp_link_test_pattern(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 8041b8369e45..c9b6676eaf53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
dpcd_set_source_specific_data(link);
msleep(post_oui_delay);
- set_default_brightness_aux(link);
- //TODO: use cached
+ set_cached_brightness_aux(link);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 1a7b93e41e35..7997936613fc 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -1079,8 +1079,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
{
uint64_t kbps;
+ enum dc_link_encoding_format link_encoding;
- kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+ else
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding);
return get_pbn_from_bw_in_kbps(kbps);
}
@@ -1538,7 +1544,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
struct fixed31_32 timing_bw =
dc_fixpt_from_int(
- dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(link)));
struct fixed31_32 avg_time_slots_per_mtp =
dc_fixpt_div(timing_bw, timeslot_bw_effective);
@@ -1971,6 +1978,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2010,6 +2018,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
+ /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS
+ * character clock in case that beyond 340MHz.
+ */
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
dc->hwss.enable_tmds_link_output(
link,
&pipe_ctx->link_res,
@@ -2129,7 +2143,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- set_default_brightness_aux(link); // TODO: use cached if known
+ set_cached_brightness_aux(link);
+
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
edp_backlight_enable_aux(link, true);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index e8b2fc4002a5..b45fda96eaf6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing(
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
#endif
- if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
+ dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
} else { // DP to HDMI TMDS converter
if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
@@ -285,7 +286,7 @@ static bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
max_bw = dp_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
@@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
for (uint8_t i = 0; i < num_streams; ++i) {
link[i] = stream[i].link;
- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
+ bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(link[i]));
}
ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 0fa1228bc178..0f19c07011b5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -427,7 +427,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
- ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
const uint32_t fixed_vs_address = 0xF004F;
const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 3a5e80b57711..b38ac3ea06b0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
struct dc_link_settings *link_setting)
{
struct dc_link *link = stream->link;
- uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link));
memset(link_setting, 0, sizeof(*link_setting));
@@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
tmp_timing.flags.DSC = 0;
- orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing,
+ dc_link_get_highest_encoding_format(link));
edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
max_link_rate = tmp_link_setting.link_rate;
}
@@ -2165,7 +2166,9 @@ static bool dp_verify_link_cap(
link,
&irq_data))
(*fail_count)++;
-
+ } else if (status == LINK_TRAINING_LINK_LOSS) {
+ success = true;
+ (*fail_count)++;
} else {
(*fail_count)++;
}
@@ -2188,6 +2191,7 @@ bool dp_verify_link_cap_with_retries(
int i = 0;
bool success = false;
int fail_count = 0;
+ struct dc_link_settings last_verified_link_cap = fail_safe_link_settings;
dp_trace_detect_lt_init(link);
@@ -2204,10 +2208,14 @@ bool dp_verify_link_cap_with_retries(
if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
link->verified_link_cap = fail_safe_link_settings;
break;
- } else if (dp_verify_link_cap(link, known_limit_link_setting,
- &fail_count) && fail_count == 0) {
- success = true;
- break;
+ } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
+ last_verified_link_cap = link->verified_link_cap;
+ if (fail_count == 0) {
+ success = true;
+ break;
+ }
+ } else {
+ link->verified_link_cap = last_verified_link_cap;
}
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index e011df4bdaf2..90339c2dfd84 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries(
} else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
+ enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED;
decide_fallback_link_setting(link, &max_link_settings,
&cur_link_settings, status);
+
+ if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+ else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+
/* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
* minimum link bandwidth.
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding);
link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
is_link_bw_low = (req_bw > link_bw);
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 15faaf645b14..ca0543e62917 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -236,6 +236,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
uint32_t vendor_lttpr_write_address = 0xF004F;
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
@@ -244,10 +249,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
uint8_t toggle_rate;
uint8_t rate;
- if (link->local_sink)
- pre_disable_intercept_delay_ms =
- link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -260,10 +261,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
if (offset != 0xFF) {
vendor_lttpr_write_address +=
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+ } else if (offset > 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
@@ -339,6 +343,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_1[0],
+ sizeof(vendor_lttpr_write_data_4lane_1));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_2[0],
+ sizeof(vendor_lttpr_write_data_4lane_2));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_3[0],
+ sizeof(vendor_lttpr_write_data_4lane_3));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_4[0],
+ sizeof(vendor_lttpr_write_data_4lane_4));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_5[0],
+ sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
@@ -596,9 +628,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t pre_disable_intercept_delay_ms = 0;
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
uint32_t vendor_lttpr_write_address = 0xF004F;
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
@@ -607,10 +644,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
uint8_t toggle_rate;
uint8_t rate;
- if (link->local_sink)
- pre_disable_intercept_delay_ms =
- link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
-
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -623,10 +656,13 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
if (offset != 0xFF) {
vendor_lttpr_write_address +=
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
+ } else if (offset > 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
@@ -702,6 +738,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_1[0],
+ sizeof(vendor_lttpr_write_data_4lane_1));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_2[0],
+ sizeof(vendor_lttpr_write_data_4lane_2));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_3[0],
+ sizeof(vendor_lttpr_write_data_4lane_3));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_4[0],
+ sizeof(vendor_lttpr_write_data_4lane_4));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_4lane_5[0],
+ sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 2039a345f23a..8b360c09e0e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -46,43 +46,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
{
union dpcd_edp_config edp_config_set;
bool panel_mode_edp = false;
+ enum dc_status result;
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
- if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
+ default:
+ break;
+ }
- default:
- break;
- }
+ /*set edp panel mode in receiver*/
+ result = core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
+ if (result == DC_OK &&
+ edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
link,
DP_EDP_CONFIGURATION_SET,
&edp_config_set.raw,
sizeof(edp_config_set.raw));
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum dc_status result;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DC_OK);
- }
+ ASSERT(result == DC_OK);
}
+
link->panel_mode = panel_mode;
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
"eDP panel mode enabled: %d \n",
@@ -164,6 +163,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+ link->backlight_settings.backlight_millinits = backlight_millinits;
if (!link->dpcd_caps.panel_luminance_control) {
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
@@ -251,10 +251,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *) backlight_millinits,
- sizeof(uint32_t)))
- return false;
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+ } else {
+ //setting to 0 as a precaution, since target_luminance_value is 3 bytes
+ memset(backlight_millinits, 0, sizeof(uint32_t));
+
+ if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ (uint8_t *)backlight_millinits,
+ sizeof(struct target_luminance_value)))
+ return false;
+ }
return true;
}
@@ -276,6 +286,16 @@ bool set_default_brightness_aux(struct dc_link *link)
return false;
}
+bool set_cached_brightness_aux(struct dc_link *link)
+{
+ if (link->backlight_settings.backlight_millinits)
+ return edp_set_backlight_level_nits(link, true,
+ link->backlight_settings.backlight_millinits, 0);
+ else
+ return set_default_brightness_aux(link);
+ return false;
+}
+
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
@@ -309,7 +329,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, sizeof(lane_count_set));
- req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link));
if (!crtc_timing->flags.DSC)
edp_decide_link_settings(link, &link_setting, req_bw);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 28f552080558..fa89bdb3a336 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool set_cached_brightness_aux(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4585e0419da6..2d995c87fbb9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -378,6 +378,7 @@ struct dmub_srv_hw_funcs {
union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub);
+ union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub);
void (*set_gpint)(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);
@@ -778,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb);
enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
union dmub_fw_boot_status *status);
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+ union dmub_fw_boot_options *option);
+
enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+ bool skip);
+
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index af1f50742371..adde1d84d773 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -170,6 +170,95 @@ extern "C" {
#endif
#pragma pack(push, 1)
+#define ABM_NUM_OF_ACE_SEGMENTS 5
+
+union abm_flags {
+ struct {
+ /**
+ * @abm_enabled: Indicates if ABM is enabled.
+ */
+ unsigned int abm_enabled : 1;
+
+ /**
+ * @disable_abm_requested: Indicates if driver has requested ABM to be disabled.
+ */
+ unsigned int disable_abm_requested : 1;
+
+ /**
+ * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled
+ * immediately.
+ */
+ unsigned int disable_abm_immediately : 1;
+
+ /**
+ * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM
+ * to be disabled immediately and keep gain.
+ */
+ unsigned int disable_abm_immediate_keep_gain : 1;
+
+ /**
+ * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled.
+ */
+ unsigned int fractional_pwm : 1;
+
+ /**
+ * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment
+ * of user backlight level.
+ */
+ unsigned int abm_gradual_bl_change : 1;
+ } bitfields;
+
+ unsigned int u32All;
+};
+
+struct abm_save_restore {
+ /**
+ * @flags: Misc. ABM flags.
+ */
+ union abm_flags flags;
+
+ /**
+ * @pause: true: pause ABM and get state
+ * false: unpause ABM after setting state
+ */
+ uint32_t pause;
+
+ /**
+ * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13)
+ */
+ uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS];
+
+ /**
+ * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6)
+ */
+ uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS];
+
+ /**
+ * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6)
+ */
+ uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS];
+
+
+ /**
+ * @knee_threshold: Current x-position of ACE knee (u0.16).
+ */
+ uint32_t knee_threshold;
+ /**
+ * @current_gain: Current backlight reduction (u16.16).
+ */
+ uint32_t current_gain;
+ /**
+ * @curr_bl_level: Current actual backlight level converging to target backlight level.
+ */
+ uint16_t curr_bl_level;
+
+ /**
+ * @curr_user_bl_level: Current nominal backlight level converging to level requested by user.
+ */
+ uint16_t curr_user_bl_level;
+
+};
+
/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
@@ -2672,6 +2761,12 @@ enum dmub_cmd_abm_type {
* unregister vertical interrupt after steady state is reached
*/
DMUB_CMD__ABM_PAUSE = 6,
+
+ /**
+ * Save and Restore ABM state. On save we save parameters, and
+ * on restore we update state with passed in data.
+ */
+ DMUB_CMD__ABM_SAVE_RESTORE = 7,
};
/**
@@ -3056,6 +3151,7 @@ struct dmub_cmd_abm_pause_data {
uint8_t pad[1];
};
+
/**
* Definition of a DMUB_CMD__ABM_PAUSE command.
*/
@@ -3072,6 +3168,36 @@ struct dmub_rb_cmd_abm_pause {
};
/**
+ * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+ */
+struct dmub_rb_cmd_abm_save_restore {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * OTG hw instance
+ */
+ uint8_t otg_inst;
+
+ /**
+ * Enable or disable ABM pause
+ */
+ uint8_t freeze;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t debug;
+
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command.
+ */
+ struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_cmd_query_feature_caps_data {
@@ -3509,6 +3635,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_pause abm_pause;
/**
+ * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+ */
+ struct dmub_rb_cmd_abm_save_restore abm_save_restore;
+
+ /**
* Definition of a DMUB_CMD__DP_AUX_ACCESS command.
*/
struct dmub_rb_cmd_dp_aux_access dp_aux_access;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h
deleted file mode 100644
index 21b02bad696f..000000000000
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DMUB_SUBVP_STATE_H
-#define DMUB_SUBVP_STATE_H
-
-#include "dmub_cmd.h"
-
-#define DMUB_SUBVP_INST0 0
-#define DMUB_SUBVP_INST1 1
-#define SUBVP_MAX_WATERMARK 0xFFFF
-
-struct dmub_subvp_hubp_state {
- uint32_t CURSOR0_0_CURSOR_POSITION;
- uint32_t CURSOR0_0_CURSOR_HOT_SPOT;
- uint32_t CURSOR0_0_CURSOR_DST_OFFSET;
- uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
- uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
- uint32_t CURSOR0_0_CURSOR_SIZE;
- uint32_t CURSOR0_0_CURSOR_CONTROL;
- uint32_t HUBPREQ0_CURSOR_SETTINGS;
- uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
- uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
-};
-
-enum subvp_error_code {
- DMUB_SUBVP_INVALID_STATE,
- DMUB_SUBVP_INVALID_TRANSITION,
-};
-
-enum subvp_state {
- DMUB_SUBVP_DISABLED,
- DMUB_SUBVP_IDLE,
- DMUB_SUBVP_TRY_ACQUIRE_LOCKS,
- DMUB_SUBVP_WAIT_FOR_LOCKS,
- DMUB_SUBVP_PRECONFIGURE,
- DMUB_SUBVP_PREPARE,
- DMUB_SUBVP_ENABLE,
- DMUB_SUBVP_SWITCHING,
- DMUB_SUBVP_END,
- DMUB_SUBVP_RESTORE,
-};
-
-/* Defines information for SUBVP to handle vertical interrupts. */
-struct dmub_subvp_vertical_interrupt_event {
- /**
- * @inst: Hardware instance of vertical interrupt.
- */
- uint8_t otg_inst;
-
- /**
- * @pad: Align structure to 4 byte boundary.
- */
- uint8_t pad[3];
-
- enum subvp_state curr_state;
-};
-
-struct dmub_subvp_vertical_interrupt_state {
- /**
- * @events: Event list.
- */
- struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS];
-};
-
-struct dmub_subvp_vline_interrupt_event {
-
- uint8_t hubp_inst;
- uint8_t pad[3];
-};
-
-struct dmub_subvp_vline_interrupt_state {
- struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES];
-};
-
-struct dmub_subvp_interrupt_ctx {
- struct dmub_subvp_vertical_interrupt_state vertical_int;
- struct dmub_subvp_vline_interrupt_state vline_int;
-};
-
-struct dmub_subvp_pipe_state {
- uint32_t pix_clk_100hz;
- uint16_t main_vblank_start;
- uint16_t main_vblank_end;
- uint16_t mall_region_lines;
- uint16_t prefetch_lines;
- uint16_t prefetch_to_mall_start_lines;
- uint16_t processing_delay_lines;
- uint8_t main_pipe_index;
- uint8_t phantom_pipe_index;
- uint16_t htotal; // htotal for main / phantom pipe
- uint16_t vtotal;
- uint16_t optc_underflow_count;
- uint16_t hubp_underflow_count;
- uint8_t pad[2];
-};
-
-/**
- * struct dmub_subvp_vblank_drr_info - Store DRR state when handling
- * SubVP + VBLANK with DRR multi-display case.
- *
- * The info stored in this struct is only valid if drr_in_use = 1.
- */
-struct dmub_subvp_vblank_drr_info {
- uint8_t drr_in_use;
- uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame
- uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK
- uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements
- uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end)
- uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end)
- uint8_t use_ramping; // Use ramping or not
- uint8_t pad[1];
-};
-
-struct dmub_subvp_vblank_pipe_info {
- uint32_t pix_clk_100hz;
- uint16_t vblank_start;
- uint16_t vblank_end;
- uint16_t vstartup_start;
- uint16_t vtotal;
- uint16_t htotal;
- uint8_t pipe_index;
- uint8_t pad[1];
- struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case
-};
-
-enum subvp_switch_type {
- DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE
- DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays
- DMUB_SUBVP_AND_VBLANK,
- DMUB_SUBVP_AND_FPO,
-};
-
-/* SubVP state. */
-struct dmub_subvp_state {
- struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS];
- struct dmub_subvp_interrupt_ctx int_ctx;
- struct dmub_subvp_vblank_pipe_info vblank_info;
- enum subvp_state state; // current state
- enum subvp_switch_type switch_type; // enum take up 4 bytes (?)
- uint8_t mclk_pending;
- uint8_t num_subvp_streams;
- uint8_t vertical_int_margin_us;
- uint8_t pstate_allow_width_us;
- uint32_t subvp_mclk_switch_count;
- uint32_t subvp_wait_lock_count;
- uint32_t driver_wait_lock_count;
- uint32_t subvp_vblank_frame_count;
- uint16_t watermark_a_cache;
- uint8_t pad[2];
-};
-
-#endif /* _DMUB_SUBVP_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 5e952541e72d..094e9f864557 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -352,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub)
return status;
}
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub)
+{
+ union dmub_fw_boot_options option;
+
+ option.all = REG_READ(DMCUB_SCRATCH14);
+ return option;
+}
+
void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 89c5a948b67d..4d520a893c7b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -239,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub);
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub);
+
void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub,
const struct dmub_region *outbox0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index bdaf43892f47..93624ffe4eb8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -255,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;
funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
+ funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option;
funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
//outbox0 call stacks
@@ -639,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.enable_dmub_boot_options)
dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
- if (dmub->hw_funcs.skip_dmub_panel_power_sequence)
+ if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub,
params->skip_panel_power_sequence);
- if (dmub->hw_funcs.reset_release)
+ if (dmub->hw_funcs.reset_release && !dmub->is_virtual)
dmub->hw_funcs.reset_release(dmub);
dmub->hw_init = true;
@@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+ union dmub_fw_boot_options *option)
+{
+ option->all = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_fw_boot_option)
+ *option = dmub->hw_funcs.get_fw_boot_option(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+ bool skip)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
+ dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip);
+
+ return DMUB_STATUS_OK;
+}
+
enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index cd870af5fd25..1b8ab20f1715 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -53,7 +53,7 @@ enum {
BITS_PER_DP_BYTE = 10,
DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */
- DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
+ DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */
};
enum lttpr_mode {