diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
151 files changed, 3046 insertions, 1932 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2c645dffec18..f2b1720a6a66 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7( process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate; process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask; } + dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction; } // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index d9955c5d2e5e..60021671b386 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) ############################################################################### # DCN30 ############################################################################### -CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o +CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 4c3e58c730b1..33b9d36619ff 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - if (!stream->dpms_off || (stream_status && stream_status->plane_count)) + if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count)) display_count++; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h index fa09c594fd36..06da34676965 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h @@ -56,6 +56,7 @@ #define DALSMC_MSG_SetDisplayRefreshFromMall 0xF #define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10 #define DALSMC_MSG_BacoAudioD3PME 0x11 -#define DALSMC_Message_Count 0x12 +#define DALSMC_MSG_SmartAccess 0x12 +#define DALSMC_Message_Count 0x13 #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 8083a553c60e..ef77fcd164ed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -30,6 +30,7 @@ #include "dce100/dce_clk_mgr.h" #include "dcn30/dcn30_clk_mgr.h" #include "dml/dcn30/dcn30_fpu.h" +#include "dcn30/dcn30m_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, .notify_link_rate_change = dcn30_notify_link_rate_change, - .is_smu_present = dcn3_is_smu_present + .is_smu_present = dcn3_is_smu_present, + .set_smartmux_switch = dcn30m_set_smartmux_switch }; static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c new file mode 100644 index 000000000000..8e8a11c7437e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "clk_mgr_internal.h" +#include "dcn30/dcn30m_clk_mgr.h" +#include "dcn30m_clk_mgr_smu_msg.h" + + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h new file mode 100644 index 000000000000..757985b2eadc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30M_CLK_MGR_H__ +#define __DCN30M_CLK_MGR_H__ + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set); + +#endif //__DCN30M_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c new file mode 100644 index 000000000000..0dd0583ff21e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c @@ -0,0 +1,118 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn30m_clk_mgr_smu_msg.h" + +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" + +#include "dalsmc.h" + +#define mmDAL_MSG_REG 0x1628A +#define mmDAL_ARG_REG 0x16273 +#define mmDAL_RESP_REG 0x16274 + +#define REG(reg_name) \ + mm ## reg_name + +#include "logger_types.h" +#undef DC_LOGGER +#define DC_LOGGER \ + CTX->logger +#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } + + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, + unsigned int delay_us, unsigned int max_retries) +{ + uint32_t reg = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + /* handle DALSMC_Result_CmdRejectedBusy? */ + + /* Log? */ + + return reg; +} + +static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + uint32_t msg_id, uint32_t param_in, uint32_t *param_out) +{ + uint32_t result; + /* Wait for response register to be ready */ + dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + if (IS_SMU_TIMEOUT(result)) + dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000); + + /* Wait for response */ + if (result == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + return true; + } + + return false; +} + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set) +{ + uint32_t response = 0; + + smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set); + + dcn30m_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SmartAccess, pins_to_set, &response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h new file mode 100644 index 000000000000..8a59a473fc5e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ +#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ + +#include "core_types.h" + +struct clk_mgr_internal; + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set); +#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 4b17d2fcd565..b59703467128 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -22,8 +22,6 @@ #include "dcn/dcn_4_1_0_offset.h" #include "dcn/dcn_4_1_0_sh_mask.h" -#include "dml/dcn401/dcn401_fpu.h" - #define DCN_BASE__INST0_SEG1 0x000000C0 #define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 @@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr) { - /* legacy */ - DC_FP_START(); - dcn401_build_wm_range_table_fpu(clk_mgr); - DC_FP_END(); - - if (clk_mgr->ctx->dc->debug.using_dml21) { - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; - - /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; - } else { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; - } - - /* Set 1B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; + /* For min clocks use as reported by PM FW and report those as min */ + uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; + uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + + /* Set A - Normal - default values */ + clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; + + /* Set B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; + + /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ + /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ + if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; + } else { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; } + + /* Set 1B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; } void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) @@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_build_wm_range_table(clk_mgr_base); } +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->smu_present && clk_mgr->dpm_present && + ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz)); +} + static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { @@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn401_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz, @@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = { .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist, .get_hard_min_memclk = dcn401_get_hard_min_memclk, .get_hard_min_fclk = dcn401_get_hard_min_fclk, + .is_dc_mode_present = dcn401_is_dc_mode_present, + .get_max_clock_khz = dcn401_get_max_clock_khz, }; struct clk_mgr_internal *dcn401_clk_mgr_construct( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h index 6c9ae5ca2c7e..97a1ce1e8a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h @@ -105,10 +105,13 @@ struct dcn401_clk_mgr { }; void dcn401_init_clocks(struct clk_mgr *clk_mgr_base); +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base); struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx, struct dccg *dccg); void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); + #endif /* __DCN401_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b34b5b52236d..c31f7f8e409f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -976,6 +976,8 @@ static bool dc_construct_ctx(struct dc *dc, if (!dc_ctx) return false; + dc_stream_init_rmcm_3dlut(dc); + dc_ctx->cgs_device = init_params->cgs_device; dc_ctx->driver_context = init_params->driver; dc_ctx->dc = dc; @@ -2381,7 +2383,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; - res = dc_validate_with_context(dc, set, params->stream_count, context, false); + res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); /* * Only update link encoder to stream assignment after bandwidth validation passed. @@ -3304,7 +3306,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (dsc_validate_context) { stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; - if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, + DC_VALIDATE_MODE_ONLY) != DC_OK) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; @@ -3526,7 +3529,7 @@ static bool update_planes_and_stream_state(struct dc *dc, } if (update_type == UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; } @@ -4632,7 +4635,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, + DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { /* prevent underflow and corruption when reconfiguring pipes */ force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { @@ -5155,7 +5159,7 @@ static bool update_planes_and_stream_v1(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); if (update_type >= UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); return false; @@ -5439,8 +5443,7 @@ bool dc_update_planes_and_stream(struct dc *dc, else ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - - if (ret) + if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) clear_update_flags(srf_updates, surface_count, stream); return ret; @@ -5471,7 +5474,7 @@ void dc_commit_updates_for_stream(struct dc *dc, ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, stream_update, state); - if (ret) + if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) clear_update_flags(srf_updates, surface_count, stream); } @@ -5544,6 +5547,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } break; + case DC_ACPI_CM_POWER_STATE_D3: + if (dc->caps.ips_support) + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); + + if (dc->caps.ips_v2_support) { + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); + } + break; default: ASSERT(dc->current_state->stream_count == 0); dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); @@ -6341,13 +6353,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); } -/* - ***************************************************************************** +/** * dc_get_power_profile_for_dc_state() - extracts power profile from dc state * * Called when DM wants to make power policy decisions based on dc_state * - ***************************************************************************** + * @context: Pointer to the dc_state from which the power profile is extracted. + * + * Return: The power profile structure containing the power level information. */ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) { @@ -6363,13 +6376,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state return profile; } -/* - ********************************************************************************** +/** * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state * - * Called when DM wants to log detile buffer size from dc_state + * This function is called to log the detile buffer size from the dc_state. * - ********************************************************************************** + * @context: a pointer to the dc_state from which the detile buffer size is extracted. + * + * Return: the size of the detile buffer, or 0 if not available. */ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) { @@ -6380,26 +6394,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) else return 0; } + /** - *********************************************************************************************** * dc_get_host_router_index: Get index of host router from a dpia link * * This function return a host router index of the target link. If the target link is dpia link. * - * @param [in] link: target link - * @param [out] host_router_index: host router index of the target link + * @link: Pointer to the target link (input) + * @host_router_index: Pointer to store the host router index of the target link (output). * - * @return: true if the host router index is found and valid. + * Return: true if the host router index is found and valid. * - *********************************************************************************************** */ bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) { - struct dc *dc = link->ctx->dc; + struct dc *dc; - if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) return false; + dc = link->ctx->dc; + if (link->link_index < dc->lowest_dpia_link_index) return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 7551d0a3fe82..bbce751b485f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status) return "Insufficient DP link bandwidth"; case DC_FAIL_HW_CURSOR_SUPPORT: return "HW Cursor not supported"; + case DC_FAIL_DP_TUNNEL_BW_VALIDATE: + return "Fail DP Tunnel BW validation"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7014b8d000bb..ec4e80e5b6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color( } } +/* Visual Confirm color definition for Smart Mux */ +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + const struct tg_color sm_ver_colors[5] = { + {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */ + {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */ + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/ + }; + + if (dc->caps.is_apu) { + /* APU driving the eDP */ + *color = sm_ver_colors[dc->config.smart_mux_version]; + } else { + /* dGPU driving the eDP - red */ + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } +} + /* Visual Confirm color definition for VABC */ void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 71e15da4bb69..130455f2802a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) link->dc->link_srv->enable_hpd_filter(link, enable); } -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - return dc->link_srv->validate_dpia_bandwidth(streams, count); + return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx); } + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 3da25bd8b578..4d6181e7c612 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3940,7 +3940,9 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count && stream->audio_info.flags.all) { + stream->audio_info.mode_count && + (stream->audio_info.flags.all || + (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); @@ -4053,7 +4055,7 @@ static bool add_all_planes_for_stream( * @set: An array of dc_validation_set with all the current streams reference * @set_count: Total of streams * @context: New context - * @fast_validate: Enable or disable fast validation + * @validate_mode: identify the validation mode * * This function updates the potential new stream in the context object. It * creates multiple lists for the add, remove, and unchanged streams. In @@ -4068,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; @@ -4242,7 +4244,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); } - res = dc_validate_global_state(dc, context, fast_validate); + res = dc_validate_global_state(dc, context, validate_mode); /* calculate pixel rate divider after deciding pxiel clock & odm combine */ if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) { @@ -4299,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) * * @dc: dc struct for this driver * @new_ctx: state to be validated - * @fast_validate: set to true if only yes/no to support matters + * @validate_mode: identify the validation mode * * Checks hardware resource availability and bandwidth requirement. * @@ -4309,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate) + enum dc_validate_mode validate_mode) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; @@ -4368,7 +4370,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate); + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode); return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 4db7383720fd..883054bb18e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { struct dc_state *state; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); -#endif state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt->use_clock_dc_limits = false; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) { + if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) { dc_state_release(state); return NULL; } - dml2_opt->use_clock_dc_limits = true; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) { + if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { dc_state_release(state); return NULL; } @@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream( return DC_ERROR_UNEXPECTED; } + dc_stream_release_3dlut_for_stream(dc, stream); + dc_stream_release(state->streams[i]); state->stream_count--; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index b883fb24fa12..4d6bc9fd4faa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -857,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) } /* +* dc_stream_get_3dlut() +* Requirements: +* 1. Is stream already owns an RMCM instance, return it. +* 2. If it doesn't and we don't need to allocate, return NULL. +* 3. If there's a free RMCM instance, assign to stream and return it. +* 4. If no free RMCM instances, return NULL. +*/ + +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + // see if one is allocated for this stream + for (int i = 0; i < num_rmcm; i++) { + if (dc->res_pool->rmcm_3dlut[i].isInUse && + dc->res_pool->rmcm_3dlut[i].stream == stream) + return &dc->res_pool->rmcm_3dlut[i]; + } + + //case: not found one, and dont need to allocate + if (!allocate_one) + return NULL; + + //see if there is an unused 3dlut, allocate + for (int i = 0; i < num_rmcm; i++) { + if (!dc->res_pool->rmcm_3dlut[i].isInUse) { + dc->res_pool->rmcm_3dlut[i].isInUse = true; + dc->res_pool->rmcm_3dlut[i].stream = stream; + return &dc->res_pool->rmcm_3dlut[i]; + } + } + + //dont have a 3dlut + return NULL; +} + + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream) +{ + struct dc_rmcm_3dlut *rmcm_3dlut = + dc_stream_get_3dlut_for_stream(dc, stream, false); + + if (rmcm_3dlut) { + rmcm_3dlut->isInUse = false; + rmcm_3dlut->stream = NULL; + rmcm_3dlut->protection_bits = 0; + } +} + + +void dc_stream_init_rmcm_3dlut(struct dc *dc) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + for (int i = 0; i < num_rmcm; i++) { + dc->res_pool->rmcm_3dlut[i].isInUse = false; + dc->res_pool->rmcm_3dlut[i].stream = NULL; + dc->res_pool->rmcm_3dlut[i].protection_bits = 0; + } +} + +/* * Finds the greatest index in refresh_rate_hz that contains a value <= refresh */ static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f41073c0147e..59c07756130d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -46,6 +46,8 @@ #include "dmub/inc/dmub_cmd.h" +#include "sspl/dc_spl_types.h" + struct abm_save_restore; /* forward declaration */ @@ -53,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.334" +#define DC_VER "3.2.340" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -66,8 +68,11 @@ struct dmub_notification; #define MAX_STREAMS 6 #define MIN_VIEWPORT_SIZE 12 #define MAX_NUM_EDP 2 +#define MAX_SUPPORTED_FORMATS 7 + #define MAX_HOST_ROUTERS_NUM 3 -#define MAX_DPIA_PER_HOST_ROUTER 2 +#define MAX_DPIA_PER_HOST_ROUTER 3 +#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER) /* Display Core Interfaces */ struct dc_versions { @@ -193,6 +198,34 @@ struct dpp_color_caps { struct rom_curve_caps ogam_rom_caps; }; +/* Below structure is to describe the HW support for mem layout, extend support + range to match what OS could handle in the roadmap */ +struct lut3d_caps { + uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */ + struct { + uint32_t swizzle_3d_rgb : 1; + uint32_t swizzle_3d_bgr : 1; + uint32_t linear_1d : 1; + } mem_layout_support; + struct { + uint32_t unorm_12msb : 1; + uint32_t unorm_12lsb : 1; + uint32_t float_fp1_5_10 : 1; + } mem_format_support; + struct { + uint32_t order_rgba : 1; + uint32_t order_bgra : 1; + } mem_pixel_order_support; + /*< size options are 9, 17, 33, 45, 65 */ + struct { + uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */ + uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */ + uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */ + uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */ + uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */ + } lut_dim_caps; +}; + /** * struct mpc_color_caps - color pipeline capabilities for multiple pipe and * plane combined blocks @@ -204,14 +237,21 @@ struct dpp_color_caps { * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single * instance * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT + * @mcm_3d_lut_caps: HW support cap for MCM LUT memory + * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory + * @preblend: whether color manager supports preblend with MPC */ struct mpc_color_caps { uint16_t gamut_remap : 1; uint16_t ogam_ram : 1; uint16_t ocsc : 1; uint16_t num_3dluts : 3; + uint16_t num_rmcm_3dluts : 3; uint16_t shared_3d_lut:1; struct rom_curve_caps ogam_rom_caps; + struct lut3d_caps mcm_3d_lut_caps; + struct lut3d_caps rmcm_3d_lut_caps; + bool preblend; }; /** @@ -271,6 +311,7 @@ struct dc_caps { bool dmcub_support; bool zstate_support; bool ips_support; + bool ips_v2_support; uint32_t num_of_internal_disp; enum dp_protocol_version max_dp_protocol_version; unsigned int mall_size_per_mem_channel; @@ -308,6 +349,8 @@ struct dc_caps { struct dc_scl_caps scl_caps; uint8_t num_of_host_routers; uint8_t num_of_dpias_per_host_router; + /* limit of the ODM only, could be limited by other factors (like pipe count)*/ + uint8_t max_odm_combine_factor; }; struct dc_bug_wa { @@ -462,6 +505,7 @@ struct dc_config { bool use_spl; bool prefer_easf; bool use_pipe_ctx_sync_logic; + int smart_mux_version; bool ignore_dpref_ss; bool enable_mipi_converter_optimization; bool use_default_clock_table; @@ -472,6 +516,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_rcg; unsigned int disable_ips_in_vpb; bool disable_ips_in_dpms_off; bool usb4_bw_alloc_support; @@ -484,6 +529,8 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + struct spl_sharpness_range dcn_sharpness_range; + struct spl_sharpness_range dcn_override_sharpness_range; }; enum visual_confirm { @@ -495,6 +542,7 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_SMARTMUX_DGPU = 10, VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, VISUAL_CONFIRM_MCLK_SWITCH = 16, @@ -773,6 +821,7 @@ enum pg_hw_resources { PG_DCHVM, PG_DWB, PG_HPO, + PG_DCOH, PG_HW_RESOURCES_NUM_ELEMENT }; @@ -789,10 +838,8 @@ union dpia_debug_options { uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ - uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ - uint32_t enable_dpia_pre_training:1; /* bit 7 */ - uint32_t unify_link_enc_assignment:1; /* bit 8 */ - uint32_t reserved:24; + uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -918,6 +965,9 @@ struct dc_debug_options { bool disable_dsc_power_gate; bool disable_optc_power_gate; bool disable_hpo_power_gate; + bool disable_io_clk_power_gate; + bool disable_mem_power_gate; + bool disable_dio_power_gate; int dsc_min_slice_height_override; int dsc_bpp_increment_div; bool disable_pplib_wm_range; @@ -1154,7 +1204,7 @@ struct dc_init_data { uint32_t *dcn_reg_offsets; uint32_t *nbio_reg_offsets; uint32_t *clk_reg_offsets; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; struct dc_callback_init { @@ -1255,6 +1305,12 @@ union dc_3dlut_state { }; +struct dc_rmcm_3dlut { + bool isInUse; + const struct dc_stream_state *stream; + uint8_t protection_bits; +}; + struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; @@ -1392,6 +1448,8 @@ struct dc_plane_state { int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; + struct spl_sharpness_range sharpness_range; + enum sharpness_range_source sharpness_source; }; struct dc_plane_info { @@ -1573,6 +1631,7 @@ struct dc_scratch_space { bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; bool force_dp_ffe_preset; + bool skip_phy_ssc_reduction; } wa_flags; union dc_dp_ffe_preset forced_dp_ffe_preset; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1582,6 +1641,8 @@ struct dc_scratch_space { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + bool is_dds; + bool is_display_mux_present; bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly struct dc_panel_config panel_config; @@ -1636,6 +1697,10 @@ struct dc { /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + /* For eDP to know the switching state of SmartMux */ + bool is_switch_in_progress_orig; + bool is_switch_in_progress_dest; + /* FBC compressor */ struct compressor *fbc_compressor; @@ -1666,7 +1731,7 @@ struct dc { } scratch; struct dml2_configuration_options dml2_options; - struct dml2_configuration_options dml2_tmp; + struct dml2_configuration_options dml2_dc_power_options; enum dc_acpi_cm_power_state power_state; }; @@ -1771,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service); -/* - * fast_validate: we return after determining if we can support the new state, - * but before we populate the programming info - */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, @@ -2379,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( struct dc_link *link, int peak_bw); /* - * Validate the BW of all the valid DPIA links to make sure it doesn't exceed - * available BW for each host router - * - * @dc: pointer to dc struct - * @stream: pointer to all possible streams - * @count: number of valid DPIA streams + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE + * return: dc_status */ -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, - const unsigned int count); +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx); /* Sink Interfaces - A sink corresponds to a display output device */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index afbcf866520e..f5ef1a07078e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; @@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else { /* RCG only */ new_signals.bits.allow_pg = 0; @@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips2 = 0; new_signals.bits.allow_z10 = 0; } + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } + // Setting RCG allow bits (IPSv2.0) + if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) { + new_signals.bits.allow_ips0_rcg = 1; + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) { + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) { + new_signals.bits.allow_ips0_rcg = 1; + } + // IPS dynamic allow bits (IPSv2 change, vpb use case) + if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) { + new_signals.bits.allow_dynamic_ips1 = 1; + } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) { + new_signals.bits.allow_dynamic_ips1 = 1; + new_signals.bits.allow_dynamic_ips1_z8 = 1; } - ips_driver->signals = new_signals; dc_dmub_srv->driver_signals = ips_driver->signals; } @@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; - uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0; if (dc->debug.dmcub_emulation) return; @@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) rcg_exit_count = ips_fw->rcg_exit_count; ips1_exit_count = ips_fw->ips1_exit_count; ips2_exit_count = ips_fw->ips2_exit_count; + ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count; ips_driver->signals.all = 0; dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( - "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", + "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit, ips_fw->rcg_entry_count, ips_fw->ips1_entry_count, - ips_fw->ips2_entry_count); + ips_fw->ips2_entry_count, + ips_fw->ips1_z8ret_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. */ dc_dmub_srv->needs_idle_wake = false; - if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && + if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || - ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) { DC_LOG_IPS( - "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", + "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( - "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); while (ips_fw->signals.bits.ips1_commit) udelay(1); DC_LOG_IPS( - "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", + DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)", __func__, rcg_exit_count, ips1_exit_count, - ips2_exit_count); + ips2_exit_count, + ips1z8_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) @@ -1656,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com return result; } -void dc_dmub_srv_fams2_update_config(struct dc *dc, +static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) { @@ -1722,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr; + union dmub_rb_cmd cmd; + uint32_t i; + + memset(config, 0, sizeof(*config)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; + cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG; + + cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr; + cmd.ib_fams2_config.ib_data.size = sizeof(*config); + + if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { + /* copy static feature configuration overrides */ + config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; + config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; + config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; + + /* send global configuration parameters */ + memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config, + sizeof(struct dmub_cmd_fams2_global_config)); + + /* construct per-stream configs */ + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { + /* copy stream static base state */ + memcpy(&config->stream_v1[i].base, + &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], + sizeof(config->stream_v1[i].base)); + + /* copy stream static sub-state */ + memcpy(&config->stream_v1[i].sub_state, + &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i], + sizeof(config->stream_v1[i].sub_state)); + } + } + + config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; + config->global.features.bits.enable = enable; + + dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + if (dc->debug.fams_version.major == 2) + dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable); + if (dc->debug.fams_version.major == 3) + dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable); +} + void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, @@ -1847,83 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip( } } -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) + +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement) { - bool result; + union dmub_rb_cmd cmd; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS; + cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL; + cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data); + + // only panel_inst=0 is supported at the moment + cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst; + cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; + + return true; +} + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode) +{ + union dmub_rb_cmd cmd; + uint32_t bytes = sizeof(struct dmub_ips_residency_info); + + dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS; + cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO; + cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data); + + cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.ips_query_residency_info.info_data.size = bytes; + cmd.ips_query_residency_info.info_data.panel_inst = panel_inst; + cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) || + cmd.ips_query_residency_info.header.ret_status == 0) return false; - result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, - start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + // copy the result to the output since ret_status != 0 means the command returned data + memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr; + lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Init failed in DMUB"); return result; } -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count) { - uint32_t i; - enum dmub_gpint_command command_code; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) - return; + memset(&cmd, 0, sizeof(cmd)); - switch (output->ips_mode) { - case DMUB_IPS_MODE_IPS1_MAX: - command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS2: - command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_RCG: - command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_ONO2_ON: - command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; - break; - default: - command_code = DMUB_GPINT__INVALID_COMMAND; - break; - } + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1 + lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Linear Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_x = params.src_x; + lsdma_data->u.tiled_copy_data.src_y = params.src_y; + lsdma_data->u.tiled_copy_data.dst_x = params.dst_x; + lsdma_data->u.tiled_copy_data.dst_y = params.dst_y; + lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.src_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.rect_x = params.rect_x; + lsdma_data->u.tiled_copy_data.rect_y = params.rect_y; + lsdma_data->u.tiled_copy_data.dcc = params.dcc; + lsdma_data->u.tiled_copy_data.tmz = params.tmz; + lsdma_data->u.tiled_copy_data.read_compress = params.read_compress; + lsdma_data->u.tiled_copy_data.write_compress = params.write_compress; + lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.data_format = params.data_format; + lsdma_data->u.tiled_copy_data.max_com = params.max_com; + lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable; + lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1; + lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.data = data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Constfill failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); - if (command_code == DMUB_GPINT__INVALID_COMMAND) + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.reg_write_data.reg_addr = reg_addr; + lsdma_data->u.reg_write_data.reg_data = reg_data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Poll Reg failed in DMUB"); + + return result; +} + +void dc_dmub_srv_release_hw(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd = {0}; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) return; - for (i = 0; i < GPINT_RETRY_NUM; i++) { - // false could mean GPINT timeout, in which case we should retry - if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, - (uint16_t)(output->ips_mode), &output->residency_percent, - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - break; - udelay(100); - } + memset(&cmd, 0, sizeof(cmd)); + cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; + cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW; + cmd.idle_opt_notify_idle.header.payload_bytes = + sizeof(cmd.idle_opt_notify_idle) - + sizeof(cmd.idle_opt_notify_idle.header); - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, - (uint16_t)(output->ips_mode), - &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->entry_counter = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[1] = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[1] = 0; - - // NUM_IPS_HISTOGRAM_BUCKETS = 16 - for (i = 0; i < 16; i++) - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->histogram[i] = 0; + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index ada5c2fb2db3..8ea320f21269 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_surface_update *srf_updates, int surface_count); +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv); +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count); +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable); +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data); + +struct lsdma_send_tiled_to_tiled_copy_command_params { + uint64_t src_addr; + uint64_t dst_addr; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t src_width : 16; + uint32_t dst_width : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_height : 16; + uint32_t dst_height : 16; + + uint32_t data_format : 6; + uint32_t swizzle_mode : 5; + uint32_t element_size : 3; + uint32_t dcc : 1; + uint32_t tmz : 1; + uint32_t read_compress : 2; + uint32_t write_compress : 2; + uint32_t max_com : 2; + uint32_t max_uncom : 1; + uint32_t padding : 9; +}; + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params); +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data); + /** * struct ips_residency_info - struct containing info from dmub_ips_residency_stats * @@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip( * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c */ struct ips_residency_info { - enum dmub_ips_mode ips_mode; + enum ips_residency_mode ips_mode; unsigned int residency_percent; unsigned int entry_counter; unsigned int total_active_time_us[2]; @@ -231,21 +285,16 @@ struct ips_residency_info { unsigned int histogram[16]; }; -/** - * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status - * - * @dc_dmub_srv: The DC DMUB service pointer - * @start_measurement: Describes whether to start or stop measurement - * - * Return: true if GPINT was sent successfully, false otherwise - */ -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement); + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, + struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode); /** - * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * - * @dc_dmub_srv: The DC DMUB service pointer - * @output: Output struct to copy the the residency info to + * @dc - pointer to DC object */ -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); +void dc_dmub_srv_release_hw(const struct dc *dc); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index d346f8ae1634..5ce1be362534 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -162,6 +162,11 @@ struct dc_link_settings { struct dc_tunnel_settings { bool should_enable_dp_tunneling; bool should_use_dp_bw_allocation; + uint8_t cm_id; + uint8_t group_id; + uint32_t bw_granularity; + uint32_t estimated_bw; + uint32_t allocated_bw; }; union dc_dp_ffe_preset { @@ -957,11 +962,21 @@ union usb4_driver_bw_cap { uint8_t raw; }; +/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */ +union dpia_tunnel_info { + struct { + uint8_t group_id :3; + uint8_t rsvd :5; + } bits; + uint8_t raw; +}; + /* DP Tunneling over USB4 */ struct dpcd_usb4_dp_tunneling_info { union dp_tun_cap_support dp_tun_cap; union dpia_info dpia_info; union usb4_driver_bw_cap driver_bw_cap; + union dpia_tunnel_info dpia_tunnel_info; uint8_t usb4_driver_id; uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index c9f6c6275ca1..667852517246 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -68,7 +68,7 @@ enum dc_plane_addr_type { struct dc_plane_address { enum dc_plane_addr_type type; - bool tmz_surface; + uint8_t tmz_surface; union { struct{ PHYSICAL_ADDRESS_LOC addr; @@ -1104,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select { enum mpcc_gamut_remap_id { MPCC_OGAM_GAMUT_REMAP, MPCC_MCM_FIRST_GAMUT_REMAP, - MPCC_MCM_SECOND_GAMUT_REMAP + MPCC_MCM_SECOND_GAMUT_REMAP, + MPCC_RMCM_GAMUT_REMAP, }; enum cursor_matrix_mode { diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index e3a8283b4098..7f57661433eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->adaptive_sharpness.enable = true; spl_in->adaptive_sharpness.sharpness_level = 0; } else if (sharpness_setting == SHARPNESS_CUSTOM) { - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500; + /* SAT: read harpness_range from dc_plane_state */ + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid; if (force_sharpness_level > 0) { if (force_sharpness_level > 10) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 341d2ffb64b1..5fc6fea211de 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -579,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc, bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream); +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one); + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream); + +void dc_stream_init_rmcm_3dlut(struct dc *dc); + struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream); void dc_dmub_update_dirty_rect(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index a4cd0eb39a3a..375ca2f13b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -175,6 +175,7 @@ struct dc_panel_patch { unsigned int embedded_tiled_slave; unsigned int disable_fams; unsigned int skip_avmute; + unsigned int skip_audio_sab_check; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; @@ -263,6 +264,7 @@ enum dc_timing_source { TIMING_SOURCE_EDID_4BYTE, TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB, TIMING_SOURCE_EDID_CEA_RID, + TIMING_SOURCE_EDID_DISPLAYID_TYPE5, TIMING_SOURCE_VBIOS, TIMING_SOURCE_CV, TIMING_SOURCE_TV, @@ -1255,7 +1257,6 @@ enum dc_cm2_gpu_mem_layout { enum dc_cm2_gpu_mem_pixel_component_order { DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA, - DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA }; enum dc_cm2_gpu_mem_format { @@ -1277,7 +1278,6 @@ struct dc_cm2_gpu_mem_format_parameters { enum dc_cm2_gpu_mem_size { DC_CM2_GPU_MEM_SIZE_171717, - DC_CM2_GPU_MEM_SIZE_333333, DC_CM2_GPU_MEM_SIZE_TRANSFORMED, }; @@ -1315,6 +1315,7 @@ struct dc_cm2_func_luts { bool mpc_3dlut_enable; bool rmcm_3dlut_enable; bool mpc_mcm_post_blend; + uint8_t rmcm_tmz; } lut3d_data; const struct dc_transfer_func *lut1d_func; }; @@ -1372,4 +1373,19 @@ struct set_backlight_level_params { uint8_t aux_inst; }; +enum dc_validate_mode { + /* validate the mode and program HW */ + DC_VALIDATE_MODE_AND_PROGRAMMING = 0, + /* only validate the mode */ + DC_VALIDATE_MODE_ONLY = 1, + /* validate the mode and get the max state (voltage level) */ + DC_VALIDATE_MODE_AND_STATE_INDEX = 2, +}; + +struct dc_validation_dpia_set { + const struct dc_link *link; + const struct dc_tunnel_settings *tunnel_settings; + uint32_t required_bw; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index ffd172231fdf..668ee2d405fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg) } } -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst) +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h index 55e8718aad22..5947a35363aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h @@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h); void dccg401_set_ref_dscclk(struct dccg *dccg, uint32_t dsc_inst); void dccg401_set_src_sel( @@ -230,7 +230,6 @@ void dccg401_set_dp_dto( const struct dp_dto_params *params); void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); void dccg401_set_dtbclk_p_src( struct dccg *dccg, enum streamclk_source src, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index d28826c3ae5f..4e06468a6284 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -292,9 +292,35 @@ static void set_speed( FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } +static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + switch (arbitrate) { + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW: + return true; + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW: + return false; + case DC_I2C_STATUS__DC_I2C_STATUS_IDLE: + default: + break; + } + + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return false; + + return true; +} + static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { + // Deassert soft reset to unblock I2C engine registers + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false); + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; uint32_t reset_length = 0; @@ -309,8 +335,8 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); + if (!acquire_engine(dce_i2c_hw)) + return false; /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); @@ -319,9 +345,8 @@ static bool setup_engine( i2c_setup_limit = dce_i2c_hw->setup_limit; /* Program pin select */ - REG_UPDATE_6(DC_I2C_CONTROL, + REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_TRANSACTION_COUNT, 0, @@ -351,6 +376,26 @@ static bool setup_engine( return true; } +/** + * If we boot without an HDMI display, the I2C engine does not get initialized + * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after + * acquire, so that after setting SW_DONE_USING_I2C on release, the engine gets + * immediately reacquired by SW, preventing DMUB from using it. + */ +static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return; + + // Still acquired after release, release again as a workaround + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW); +} + static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { @@ -378,9 +423,9 @@ static void release_engine( /*for HW HDCP Ri polling failure w/a test*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp); - /* Release I2C after reset, so HW or DMCU could use it */ - REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1, - DC_I2C_SW_USE_I2C_REG_REQ, 0); + // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + cntl_stuck_hw_workaround(dce_i2c_hw); if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index fcd3d86ad517..e7a318e26d38 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -4,6 +4,7 @@ #include "dc.h" #include "dc_dmub_srv.h" +#include "dc_dp_types.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dmub_replay.h" @@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s /* * Enable/Disable Replay. */ -static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst, + struct dc_link *link) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum replay_state state = REPLAY_STATE_0; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + uint8_t i; memset(&cmd, 0, sizeof(cmd)); cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; - if (enable) + if (enable) { cmd.replay_enable.data.enable = REPLAY_ENABLE; - else + // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay + if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (!pipe_ctx) + return; + + cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; + } + } else cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); @@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst; + if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { + if (pipe_ctx->stream_res.hpo_dp_stream_enc) + copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + else + copy_settings_data->hpo_stream_enc_inst = 0; + if (pipe_ctx->link_res.hpo_dp_link_enc) + copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; + else + copy_settings_data->hpo_link_enc_inst = 0; + } + if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else @@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); + pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e6346c0ffc0e..ccbe385e132c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -19,7 +19,7 @@ struct dmub_replay_funcs { void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst); void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, - uint8_t panel_inst); + uint8_t panel_inst, struct dc_link *link); bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index e1d500633dfa..b357683b4255 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags) - ifdef CONFIG_DRM_AMD_DC_FP DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o DML += dcn10/dcn10_fpu.o @@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o DML += dcn314/dcn314_fpu.o DML += dcn35/dcn35_fpu.o DML += dcn351/dcn351_fpu.o -DML += dcn401/dcn401_fpu.o DML += dsc/rc_calc_fpu.o DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index f1235bf9a596..74962791302f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic) bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { /* * we want a breakdown of the various stages of validation, which the @@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth( BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) @@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth( } } else if (v->voltage_level == number_of_states_plus_one) { BW_VAL_TRACE_SKIP(fail); - } else if (fast_validate) { + } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index e9fea9c2162e..2a2eaf6adf26 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params( int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i; bool synchronized_vblank = true; @@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; @@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_COUNT(); - out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -2077,7 +2077,7 @@ validate_out: } bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool voltage_supported = false; bool full_pstate_supported = false; @@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - if (fast_validate) { - return dcn20_validate_bandwidth_internal(dc, context, true, pipes); - } + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes); // Best case, we support full UCLK switch latency - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || @@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st)); - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { @@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { @@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel_req, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; int vlevel, vlevel_max; @@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, } bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index b6c34198ddc8..aed00039ca62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc, int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks); void dcn20_update_bounding_box(struct dc *dc, @@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc, void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes); void dcn20_fpu_set_wm_ranges(int i, struct pp_smu_wm_range_sets *ranges, struct _vcs_dpi_soc_bounding_box_st *loaded_bb); @@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); -bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool - fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode); +bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum + dc_validate_mode, display_e2e_pipe_params_st *pipes); void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index 88789987bdbc..e5f5c0663750 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg( * newly found dummy_latency_index */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; @@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, while (dummy_latency_index < max_latency_table_entries) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank == dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index d2ae43a82ba5..dfcc5d50071e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate( int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* __DCN31_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 5ed117e11aa2..df9d50b9b57c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c dc_assert_fp_enabled(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h index d32c5bb99f4c..362ac79184ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h @@ -35,6 +35,6 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b0fc1fd20208..6160952245b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); /* for subvp + DRR case, if subvp pipes are still present we support pstate */ if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && @@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) @@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); // Populate dppclk to trigger a recalculate in dml_get_voltage_level // so the phantom pipe DLG params can be assigned correctly. pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); @@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc_state_remove_phantom_streams_and_planes(dc, context); dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); /* This may adjust vlevel and maxMpcComb */ @@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; bool repopulate_pipes = false; @@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, for (i = 0; i < context->stream_count; i++) resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc, dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt, &repopulate_pipes)) goto validate_fail; } - if (fast_validate || + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || (dc->debug.dml_disallow_alternate_prefetch_modes && (vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { @@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_none; - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.validate_max_state = false; @@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, int flag_vlevel = vlevel; int i; - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!dc->config.enable_windowed_mpo_odm) dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); @@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, } context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; @@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; } - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, + DC_VALIDATE_MODE_AND_PROGRAMMING); if (vlevel_temp < vlevel) { vlevel = vlevel_temp; maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; @@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, stream_status->fpo_in_use = false; } context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 276e90e4e0ce..273d2bd79d85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 92f0a099d089..5d73efa2f0c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h index 067480fc3691..d121c5afce71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h @@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 17d0b4923b0c..6f516af82956 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h index f93efab9a668..f71d9d8d0759 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c deleted file mode 100644 index 4fbecb5ff349..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#include "dcn401_fpu.h" -#include "dcn401/dcn401_resource.h" -// We need this includes for WATERMARKS_* defines -#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h" -#include "link.h" - -#define DC_LOGGER_INIT(logger) - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -{ - /* defaults */ - double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us; - double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us; - double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us; - double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us; - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - uint16_t setb_min_uclk_mhz = min_uclk_mhz; - uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; - - dc_assert_fp_enabled(); - - /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ - if (dcfclk_mhz_for_the_second_state) - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; - else - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz) - setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; - clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; - clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; - clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; - clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; - } - /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ - /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ - clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; -} - -/* - * dcn401_update_bw_bounding_box - * - * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from - * spreadsheet with actual values as per dGPU SKU: - * - with passed few options from dc->config - * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might - * need to get it from PM FW) - * - with passed latency values (passed in ns units) in dc-> bb override for - * debugging purposes - * - with passed latencies from VBIOS (in 100_ns units) if available for - * certain dGPU SKU - * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU - * of the same ASIC) - * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM - * FW for different clocks (which might differ for certain dGPU SKU of the - * same ASIC) - */ -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) -{ - dc_assert_fp_enabled(); - - /* Override from passed dc->bb_overrides if available*/ - if (dc->bb_overrides.sr_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - dc->bb_overrides.sr_exit_time_ns / 1000.0; - - if (dc->bb_overrides.sr_enter_plus_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - - if (dc->bb_overrides.urgent_latency_ns) - dc->dml2_options.bbox_overrides.urgent_latency_us = - dc->bb_overrides.urgent_latency_ns / 1000.0; - - if (dc->bb_overrides.dram_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - - if (dc->bb_overrides.fclk_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dc->dml2_options.bbox_overrides.dram_num_chan = - dc->ctx->dc_bios->vram_info.num_chans; - - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dc->dml2_options.bbox_overrides.dram_chanel_width_bytes = - dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0; - dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0; - - if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) { - unsigned int i = 0; - - dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) { - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - } - } - } -} - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h deleted file mode 100644 index 329f1788843c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#ifndef __DCN401_FPU_H__ -#define __DCN401_FPU_H__ - -#include "clk_mgr.h" - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr); - -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 157ecf008d6c..4c21ce42054c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) AMD_DISPLAY_FILES += $(AMD_DAL_DML2) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) @@ -94,17 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) - - CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) @@ -120,6 +120,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) DML21 := src/dml2_top/dml2_top_interfaces.o DML21 += src/dml2_top/dml2_top_soc15.o DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o DML21 += src/dml2_core/dml2_core_factory.o DML21 += src/dml2_core/dml2_core_dcn4_calcs.o DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 7ae9c0ba0c9e..715f9019a33e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx, in_out_params->mode_lib, in_out_params->in_display_cfg, - 0, + in_out_params->in_start_state_idx, in_out_params->mode_lib->states.num_states - 1); if (result) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index 0670e4dc4fd9..dbeb08466092 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -1917,6 +1917,7 @@ struct display_mode_lib_st { struct dml_mode_support_ex_params_st { struct display_mode_lib_st *mode_lib; const struct dml_display_cfg_st *in_display_cfg; + dml_uint_t in_start_state_idx; dml_uint_t out_lowest_state_idx; struct dml_mode_support_info_st *out_evaluation_info; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 2aa6d44bb359..a06217a9eef6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml21_wrapper.h" #include "dml2_core_dcn4_calcs.h" #include "dml2_internal_shared_types.h" @@ -11,277 +10,263 @@ #include "dml21_translation_helper.h" #include "bounding_boxes/dcn4_soc_bb.h" -static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) -{ - const struct dml2_soc_bb *soc_bb; - const struct dml2_soc_qos_parameters *qos_params; - - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - default: - if (config->bb_from_dmub) - soc_bb = config->bb_from_dmub; - else - soc_bb = &dml2_socbb_dcn401; - - qos_params = &dml_dcn4_variant_a_soc_qos_params; - } - - /* patch soc bb */ - memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb)); - - /* patch qos params */ - memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters)); -} - -static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config) -{ - memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb)); -} - -static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init, +static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options, + const struct dc *in_dc, const struct dml2_configuration_options *config) { - memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities)); + bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable; + + /* ODM options */ + pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; + pmo_options->disable_dyn_odm_for_multi_stream = true; + pmo_options->disable_dyn_odm_for_stream_with_svp = true; + + pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); + + /* NOTE: DRR and SubVP Require FAMS2 */ + pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || + in_dc->debug.force_disable_subvp || + disable_fams2; + pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || + disable_fams2; + pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || + disable_fams2; + pmo_options->disable_fams2 = disable_fams2; + + pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || + in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; + pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * Populate dml_init based on default static values in soc bb. The default + * values are for reference and support at least minimal operation of current + * SoC and DCN hardware. The values could be modifed by subsequent override + * functions to reflect our true hardware capability. + */ +static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - const struct dml2_ip_capabilities *ip_caps; - switch (in_dc->ctx->dce_version) { case DCN_VERSION_4_01: + dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; + dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config); + dml_init->soc_bb = dml2_socbb_dcn401; + dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params; + dml_init->ip_caps = dml2_dcn401_max_ip_caps; + break; default: - ip_caps = &dml2_dcn401_max_ip_caps; + memset(dml_init, 0, sizeof(*dml_init)); + DC_ERR("unsupported dcn version for DML21!"); + return; } - - memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities)); } -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, +static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_socbb_params(dml_init, config, in_dc); - else - dml21_external_socbb_params(dml_init, config); + dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; + dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; } -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * SMU stands for System Management Unit. It is a power management processor. + * It owns the initialization of dc's clock table and programming of clock values + * based on dc's requests. + * Our clock values in base soc bb is a dummy placeholder. The real clock values + * are retrieved from SMU firmware to dc clock table at runtime. + * This function overrides our dummy placeholder values with real values in dc + * clock table. + */ +static void override_dml_init_with_values_from_smu( + struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_ip_params(dml_init, config, in_dc); - else - dml21_external_ip_params(dml_init, config); -} - -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, const struct dc *in_dc) -{ int i; - const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table; - struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; - struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table; - - /* override clocks if smu is present */ - if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) { - /* dcfclk */ - if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { - dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dcfclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && - dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; - dml_clk_table->dcfclk.num_clk_values = i + 1; - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; - dml_clk_table->dcfclk.num_clk_values = i; - } + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; + + if (!in_dc->clk_mgr->funcs->is_smu_present || + !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) + /* skip if smu is not present */ + return; + + /* dcfclk */ + if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { + dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dcfclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && + dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { + dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; + dml_clk_table->dcfclk.num_clk_values = i + 1; } else { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; + dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.num_clk_values = i; } } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; } + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = 0; } } + } - /* fclk */ - if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { - dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->fclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && - dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { - dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; - dml_clk_table->fclk.num_clk_values = i + 1; - } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; - dml_clk_table->fclk.num_clk_values = i; - } + /* fclk */ + if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { + dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->fclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && + dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { + dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; + dml_clk_table->fclk.num_clk_values = i + 1; } else { - dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; + dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.num_clk_values = i; } } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; } + } else { + dml_clk_table->fclk.clk_values_khz[i] = 0; } } + } - /* uclk */ - if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { - dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->uclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && - dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { - dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; - dml_clk_table->uclk.num_clk_values = i + 1; - } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; - dml_clk_table->uclk.num_clk_values = i; - } + /* uclk */ + if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { + dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->uclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && + dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { + dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; + dml_clk_table->uclk.num_clk_values = i + 1; } else { - dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; + dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.num_clk_values = i; } } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; } + } else { + dml_clk_table->uclk.clk_values_khz[i] = 0; } } + } - /* dispclk */ - if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { - dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dispclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && - dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { - dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; - dml_clk_table->dispclk.num_clk_values = i + 1; - } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; - dml_clk_table->dispclk.num_clk_values = i; - } + /* dispclk */ + if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { + dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dispclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && + dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { + dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; + dml_clk_table->dispclk.num_clk_values = i + 1; } else { - dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; + dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.num_clk_values = i; } } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; } + } else { + dml_clk_table->dispclk.clk_values_khz[i] = 0; } } + } - /* dppclk */ - if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { - dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dppclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && - dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { - dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; - dml_clk_table->dppclk.num_clk_values = i + 1; - } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; - dml_clk_table->dppclk.num_clk_values = i; - } + /* dppclk */ + if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { + dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dppclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && + dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { + dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; + dml_clk_table->dppclk.num_clk_values = i + 1; } else { - dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; + dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.num_clk_values = i; } } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; } + } else { + dml_clk_table->dppclk.clk_values_khz[i] = 0; } } + } - /* dtbclk */ - if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { - dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dtbclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && - dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; - dml_clk_table->dtbclk.num_clk_values = i + 1; - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; - dml_clk_table->dtbclk.num_clk_values = i; - } + /* dtbclk */ + if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { + dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dtbclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && + dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { + dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; + dml_clk_table->dtbclk.num_clk_values = i + 1; } else { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; + dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.num_clk_values = i; } } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; } + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = 0; } } + } - /* socclk */ - if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { - dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->socclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && - dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { - dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; - dml_clk_table->socclk.num_clk_values = i + 1; - } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; - dml_clk_table->socclk.num_clk_values = i; - } + /* socclk */ + if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { + dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->socclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && + dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { + dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; + dml_clk_table->socclk.num_clk_values = i + 1; } else { - dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; + dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.num_clk_values = i; } } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; } + } else { + dml_clk_table->socclk.clk_values_khz[i] = 0; } } - - /* do not override phyclks for now */ - /* phyclk */ - // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000; - // } - - /* phyclk_d18 */ - // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000; - // } - - /* phyclk_d32 */ - // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000; - // } } +} - dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; - dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; - dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; +static void override_dml_init_with_values_from_vbios( + struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; + struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; - /* override bounding box paramters from VBIOS */ if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0) dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10; @@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; } - /* override bounding box paramters from DC config */ - if (in_dc->bb_overrides.sr_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_exit_latency_us = - in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; +} + + +static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + /* + * TODO - There seems to be overlaps between the values overriden from + * dmub and vbios. Investigate and identify the values that DMUB needs + * to own. + */ +// const struct dmub_soc_bb_params *dmub_bb_params = +// (const struct dmub_soc_bb_params *)config->bb_from_dmub; + +// if (dmub_bb_params == NULL) +// return; + +// if (dmub_bb_params->dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = +// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_read_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us = +// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_write_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us = +// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0; +// if (dmub_bb_params->fclk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = +// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->g7_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us = +// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0; +// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = +// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_min_idle_time_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_min_idle_time = +// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0; +// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #else +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #endif +// if (dmub_bb_params->vmin_limit_dispclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz; +// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz; +// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us = +// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0; +} + +static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + if (!config->use_native_soc_bb_construction) { + dml_init->soc_bb = config->external_socbb_ip_params->soc_bb; + dml_init->ip_caps = config->external_socbb_ip_params->ip_params; } - if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = + if (in_dc->bb_overrides.sr_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = + in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - if (in_dc->bb_overrides.dram_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = + if (in_dc->bb_overrides.dram_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - if (in_dc->bb_overrides.fclk_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.fclk_change_blackout_us = + if (in_dc->bb_overrides.fclk_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0; - } +} - //TODO - // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) { - // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - // } +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + populate_default_dml_init_params(dml_init, config, in_dc); + + override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc); + + override_dml_init_with_values_from_smu(dml_init, config, in_dc); + + override_dml_init_with_values_from_vbios(dml_init, config, in_dc); + + override_dml_init_with_values_from_dmub(dml_init, config, in_dc); + + override_dml_init_with_values_from_software_policy(dml_init, config, in_dc); } static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) @@ -726,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state( switch (plane_state->tiling_info.gfxversion) { case DcGfxVersion7: case DcGfxVersion8: - // Placeholder for programming the array_mode break; case DcGfxVersion9: case DcGfxVersion10: @@ -889,10 +961,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm case DC_CM2_GPU_MEM_SIZE_171717: plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; break; - case DC_CM2_GPU_MEM_SIZE_333333: - plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube; - break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: + default: //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 73a013be1e48..9880d3e0398e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -17,9 +17,7 @@ struct dml2_context; struct dml2_configuration_options; struct dml2_initialize_instance_in_out; -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context); void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c index 930e86cdb88a..ee721606b883 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c @@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc, /* reset fams2 data */ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); + memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config)); if (dml_ctx->v21.mode_programming.programming->fams2_required) { @@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc, memcpy(static_base_state, &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params, sizeof(union dmub_cmd_fams2_config)); - memcpy(static_sub_state, - &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, - sizeof(union dmub_cmd_fams2_config)); + + if (dc->debug.fams_version.major == 3) { + memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams], + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2, + sizeof(union dmub_fams2_stream_static_sub_state_v2)); + } else { + memcpy(static_sub_state, + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, + sizeof(union dmub_cmd_fams2_config)); + } switch (dc->debug.fams_version.minor) { case 1: diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 208d3651b6ba..03de3cf06ae5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,8 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include <linux/vmalloc.h> - #include "dml2_internal_types.h" #include "dml_top.h" #include "dml2_core_dcn4_calcs.h" @@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) return true; } -static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) +static void dml21_populate_configuration_options(const struct dc *in_dc, + struct dml2_context *dml_ctx, + const struct dml2_configuration_options *config) { - bool disable_fams2; - struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options; - - /* ODM options */ - pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; - pmo_options->disable_dyn_odm_for_multi_stream = true; - pmo_options->disable_dyn_odm_for_stream_with_svp = true; + dml_ctx->config = *config; /* UCLK P-State options */ if (in_dc->debug.dml21_force_pstate_method) { @@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex } else { dml_ctx->config.pmo.force_pstate_method_enable = false; } - - pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); - - /* NOTE: DRR and SubVP Require FAMS2 */ - disable_fams2 = !in_dc->debug.fams2_config.bits.enable; - pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || - in_dc->debug.force_disable_subvp || - disable_fams2; - pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || - disable_fams2; - pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || - disable_fams2; - pmo_options->disable_fams2 = disable_fams2; - - pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || - in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; - pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; - break; - default: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid; - } - (*dml_ctx)->architecture = dml2_architecture_21; + dml_ctx->architecture = dml2_architecture_21; - /* Store configuration options */ - (*dml_ctx)->config = *config; + dml21_populate_configuration_options(in_dc, dml_ctx, config); DC_FP_START(); - /*Initialize SOCBB and DCNIP params */ - dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc); - - /* apply debug overrides */ - dml21_apply_debug_options(in_dc, *dml_ctx, config); + dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc); - /*Initialize DML21 instance */ - dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + dml2_initialize_instance(&dml_ctx->v21.dml_init); DC_FP_END(); } @@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s if (!dml21_allocate_memory(dml_ctx)) return false; - dml21_init(in_dc, dml_ctx, config); + dml21_init(in_dc, *dml_ctx, config); return true; } @@ -328,12 +290,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co return true; } -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate) +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode) { bool out = false; - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) + /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) out = dml21_check_mode_support(in_dc, context, dml_ctx); else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); @@ -496,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx, return true; } -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { dml21_init(in_dc, dml_ctx, config); } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h index 42e715024bc9..15f92029d2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h @@ -14,6 +14,7 @@ struct dc; struct dc_state; struct dml2_configuration_options; struct dml2_context; +enum dc_validate_mode; /** * dml2_create - Creates dml21_context. @@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, struct dml2_context *src_dml_ctx); bool dml21_create_copy(struct dml2_context **dst_dml_ctx, struct dml2_context *src_dml_ctx); -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config); +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); /** * dml21_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX + * will not populate context.res_ctx. * * Based on fast_validate option internally would call: * - * -dml21_mode_check_and_programming - for non fast_validate option + * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option * Calculates if dc_state can be supported on the input display * configuration. If supported, generates the necessary HW * programming for the new dc_state. * - * -dml21_check_mode_support - for fast_validate option + * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option * Calculates if dc_state can be supported for the input display * config. @@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate); +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode); /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h index c047d56527c4..a64ec4dcf11a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h @@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o */ bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 84c90050668c..b05030926ce8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -46,6 +46,7 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; + uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h index 255f05de362c..e8dc6471c0be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h @@ -222,6 +222,7 @@ struct dml2_composition_cfg { struct { bool enabled; + bool upsp_enabled; struct { double h_ratio; double v_ratio; @@ -426,6 +427,7 @@ struct dml2_stream_parameters { struct dml2_display_cfg { bool gpuvm_enable; + bool ffbm_enable; bool hostvm_enable; // Allocate DET proportionally between streams based on pixel rate diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h index 5f0bc42d1d2f..8c9f414aa6bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h @@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters { double dram_clk_change_write_only_us; double fclk_change_blackout_us; double g7_ppt_blackout_us; + double g7_temperature_read_blackout_us; double stutter_enter_plus_exit_latency_us; double stutter_exit_latency_us; double z8_stutter_enter_plus_exit_latency_us; double z8_stutter_exit_latency_us; double z8_min_idle_time; double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE]; + double type_b_dram_clk_change_blackout_us; + double type_b_ppt_blackout_us; }; struct dml2_clk_table { @@ -130,6 +133,7 @@ struct dml2_soc_state_table { struct dml2_soc_vmin_clock_limits { unsigned long dispclk_khz; + unsigned long dcfclk_khz; }; struct dml2_soc_bb { @@ -138,6 +142,7 @@ struct dml2_soc_bb { struct dml2_soc_power_management_parameters power_management_parameters; struct dml2_soc_vmin_clock_limits vmin_limit; + double lower_bound_bandwidth_dchub; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 0dbf886d8926..98c0234e2f47 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate { dml2_output_rate_hdmi_rate_6x4 = 9, dml2_output_rate_hdmi_rate_8x4 = 10, dml2_output_rate_hdmi_rate_10x4 = 11, - dml2_output_rate_hdmi_rate_12x4 = 12 + dml2_output_rate_hdmi_rate_12x4 = 12, + dml2_output_rate_hdmi_rate_16x4 = 13, + dml2_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_pmo_options { @@ -279,7 +281,10 @@ struct dml2_per_stream_programming { } phantom_stream; union dmub_cmd_fams2_config fams2_base_params; - union dmub_cmd_fams2_config fams2_sub_params; + union { + union dmub_cmd_fams2_config fams2_sub_params; + union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2; + }; }; //----------------- @@ -674,9 +679,14 @@ struct dml2_display_cfg_programming { // unlimited # of mcache struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES]; + bool failed_prefetch; + bool failed_uclk_pstate; bool failed_mcache_validation; bool failed_dpmm; bool failed_mode_programming; + bool failed_mode_programming_dcfclk; + bool failed_mode_programming_prefetch; + bool failed_mode_programming_flip; bool failed_map_watermarks; } informative; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 5b62cd19d979..b9cff2198511 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -4861,7 +4861,7 @@ static double get_urgent_bandwidth_required( double ReadBandwidthChroma[], double PrefetchBandwidthLuma[], double PrefetchBandwidthChroma[], - double PrefetchBandwidthOto[], + double PrefetchBandwidthMax[], double excess_vactive_fill_bw_l[], double excess_vactive_fill_bw_c[], double cursor_bw[], @@ -4925,9 +4925,9 @@ static double get_urgent_bandwidth_required( l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k]; l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur; l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; - l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; + l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]; - surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto); + surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max); /* export peak required bandwidth for the surface */ surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]); @@ -5125,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->Tsw_est3 = 0.0; s->cursor_prefetch_bytes = 0; *p->prefetch_cursor_bw = 0; - *p->RequiredPrefetchBWOTO = 0.0; + *p->RequiredPrefetchBWMax = 0.0; dcc_mrq_enable = (p->dcc_enable && p->mrq_present); @@ -5356,7 +5356,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule * and the required bandwidth increases when going from ms to mp */ - *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto; + *p->RequiredPrefetchBWMax = s->prefetch_bw_oto; #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l); @@ -5718,8 +5718,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->TimeForFetchingVM = s->Tvm_equ; s->TimeForFetchingRowInVBlank = s->Tr0_equ; - *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + + /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming. + * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data + * bandwidth may end up higher than what was calculated in mode support. + */ + *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__); @@ -6115,7 +6121,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax l->zero_array, l->zero_array, l->zero_array, @@ -6152,7 +6158,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6189,7 +6195,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6226,7 +6232,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6263,7 +6269,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -7490,7 +7496,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k]; @@ -7635,7 +7641,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7802,7 +7808,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7908,6 +7914,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter } + static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params) { struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib; @@ -11256,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]; - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k]; @@ -11399,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw; @@ -11539,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw; calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw; calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw; calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma; calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma; @@ -11883,7 +11890,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex } //Maximum Bandwidth Used - s->TotalWRBandwidth = 0; + mode_lib->mp.TotalWRBandwidth = 0; for (k = 0; k < display_cfg->num_streams; ++k) { s->WRBandwidth = 0; if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) { @@ -11892,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000)) * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0); - s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth; + mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth; } } @@ -13062,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4; else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4) out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4; out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k]; out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k]; @@ -13246,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k]; out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k]; - out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0; + out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0; out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k]; out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k]; out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h index bdee6ad7bc59..28687565ac22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h @@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe { double DCFClkDeepSleep; unsigned int DPPPerSurface; bool ScalerEnabled; + bool UPSPEnabled; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate { dml2_core_internal_output_rate_hdmi_rate_6x4 = 9, dml2_core_internal_output_rate_hdmi_rate_8x4 = 10, dml2_core_internal_output_rate_hdmi_rate_10x4 = 11, - dml2_core_internal_output_rate_hdmi_rate_12x4 = 12 + dml2_core_internal_output_rate_hdmi_rate_12x4 = 12, + dml2_core_internal_output_rate_hdmi_rate_16x4 = 13, + dml2_core_internal_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_core_internal_watermarks { @@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info { bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; bool EnoughUrgentLatencyHidingSupport; + bool PrefetchScheduleSupported; bool PrefetchSupported; bool PrefetchBandwidthSupported; bool DynamicMetadataSupported; bool VRatioInPrefetchSupported; bool DISPCLK_DPPCLK_Support; bool TotalAvailablePipesSupport; + bool ODMSupport; bool ModeSupport; bool ViewportSizeSupport; @@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info { double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - double max_urgent_latency_us; double max_non_urgent_latency_us; double avg_non_urgent_latency_us; @@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info { bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; + bool dcfclk_support; + bool qos_bandwidth_support; }; struct dml2_core_internal_mode_support { @@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support { double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes + double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes double uclk_freq_mhz; double dram_bw_mbps; double max_dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting @@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support { double TWait[DML2_MAX_PLANES]; bool UnboundedRequestEnabled; + unsigned int compbuf_reserved_space_64b; + bool hw_debug5; unsigned int CompressedBufferSizeInkByte; double VRatioPreY[DML2_MAX_PLANES]; double VRatioPreC[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_l[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_c[DML2_MAX_PLANES]; unsigned int swath_width_luma_ub[DML2_MAX_PLANES]; unsigned int swath_width_chroma_ub[DML2_MAX_PLANES]; unsigned int RequiredSlots[DML2_MAX_PLANES]; @@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support { double dst_y_prefetch[DML2_MAX_PLANES]; double LinesForVM[DML2_MAX_PLANES]; double LinesForDPTERow[DML2_MAX_PLANES]; - double SwathWidthYSingleDPP[DML2_MAX_PLANES]; - double SwathWidthCSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES]; unsigned int BytePerPixelY[DML2_MAX_PLANES]; unsigned int BytePerPixelC[DML2_MAX_PLANES]; double BytePerPixelInDETY[DML2_MAX_PLANES]; @@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support { double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES]; + bool is_using_mall_for_ss[DML2_MAX_PLANES]; + unsigned int meta_row_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES]; + bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; + unsigned int meta_req_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES]; + unsigned int meta_req_width[DML2_MAX_PLANES]; + unsigned int meta_row_width[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear[DML2_MAX_PLANES]; + unsigned int PTERequestSizeY[DML2_MAX_PLANES]; + unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES]; + unsigned int PTERequestSizeC[DML2_MAX_PLANES]; + unsigned int meta_req_height[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int meta_req_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES]; + unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; + unsigned int vm_group_bytes[DML2_MAX_PLANES]; + unsigned int VReadyOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateWidthPix[DML2_MAX_PLANES]; + double TSetup[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; + unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos. + double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; + double MaxActiveFCLKChangeLatencySupported; + // Backend bool RequiresDSC[DML2_MAX_PLANES]; bool RequiresFEC[DML2_MAX_PLANES]; double OutputBpp[DML2_MAX_PLANES]; + double DesiredOutputBpp[DML2_MAX_PLANES]; + double PixelClockBackEnd[DML2_MAX_PLANES]; unsigned int DSCDelay[DML2_MAX_PLANES]; enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES]; enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES]; + bool TotalAvailablePipesSupportNoDSC; + bool TotalAvailablePipesSupportDSC; + unsigned int NumberOfDPPNoDSC; + unsigned int NumberOfDPPDSC; + enum dml2_odm_mode ODMModeNoDSC; + enum dml2_odm_mode ODMModeDSC; + double RequiredDISPCLKPerSurfaceNoDSC; + double RequiredDISPCLKPerSurfaceDSC; + unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES]; // Bandwidth Related Info double BandwidthAvailableForImmediateFlip; @@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support { double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK]; double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES]; double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES]; - /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */ - double RequiredPrefetchBWOTO[DML2_MAX_PLANES]; + /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches. + * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp + * + * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support. + * Some slight difference in variables may cause the pixel data bandwidth to be higher + * even though overall equ prefetch bandwidths can be lower going from ms to mp + */ + double RequiredPrefetchBWMax[DML2_MAX_PLANES]; double cursor_bw[DML2_MAX_PLANES]; double prefetch_cursor_bw[DML2_MAX_PLANES]; double prefetch_vmrow_bw[DML2_MAX_PLANES]; @@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support { bool mall_comb_mcache_c[DML2_MAX_PLANES]; bool lc_comb_mcache[DML2_MAX_PLANES]; + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + + unsigned int meta_row_height_luma[DML2_MAX_PLANES]; + unsigned int meta_row_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + + unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + + unsigned int MaximumVStartup[DML2_MAX_PLANES]; + + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; + unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; + + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program { double FabricClock; /// <brief Basically just the clock freq at the min (or given) state //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double uclk_freq_mhz; unsigned int NoOfDPP[DML2_MAX_PLANES]; enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; @@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program { unsigned int MacroTileHeightC[DML2_MAX_PLANES]; unsigned int MacroTileWidthY[DML2_MAX_PLANES]; unsigned int MacroTileWidthC[DML2_MAX_PLANES]; + double MaximumSwathWidthLuma[DML2_MAX_PLANES]; + double MaximumSwathWidthChroma[DML2_MAX_PLANES]; bool surf_linear128_l[DML2_MAX_PLANES]; bool surf_linear128_c[DML2_MAX_PLANES]; @@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program { double UrgentBurstFactorChroma[DML2_MAX_PLANES]; double UrgentBurstFactorChromaPre[DML2_MAX_PLANES]; + double MaximumSwathWidthInLineBufferLuma; + double MaximumSwathWidthInLineBufferChroma; + + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + double meta_row_bw[DML2_MAX_PLANES]; unsigned int meta_row_bytes[DML2_MAX_PLANES]; unsigned int meta_req_width[DML2_MAX_PLANES]; @@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program { unsigned int PTERequestSizeC[DML2_MAX_PLANES]; double TWait[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; double Tdmdl_vm[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; double Tdmdl[DML2_MAX_PLANES]; double TSetup[DML2_MAX_PLANES]; unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; @@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program { double TCalc; unsigned int TotImmediateFlipBytes; + unsigned int MaxTotalDETInKByte; + unsigned int NomDETInKByte; + unsigned int MinCompressedBufferSizeInKByte; + double PixelClockBackEnd[DML2_MAX_PLANES]; + double OutputBpp[DML2_MAX_PLANES]; + bool dsc_enable[DML2_MAX_PLANES]; + unsigned int num_dsc_slices[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + bool immediate_flip_required; // any pipes need immediate flip + double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state + double TotalWRBandwidth; + double max_urgent_latency_us; + double df_response_time_us; + // ------------------- // Output // ------------------- @@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program { // Support bool UrgVactiveBandwidthSupport; + bool PrefetchScheduleSupported; + bool UrgentBandwidthSupport; bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported bool ImmediateFlipSupported; bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES]; + bool dcfclk_support; // Clock double Dcfclk; @@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program { // RQ registers bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; - + double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES]; bool is_using_mall_for_ss[DML2_MAX_PLANES]; @@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals { double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; - unsigned int dummy_integer_array[2][DML2_MAX_PLANES]; + unsigned int dummy_integer_array[4][DML2_MAX_PLANES]; enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES]; double dummy_single_array[2][DML2_MAX_PLANES]; - unsigned int dummy_long_array[4][DML2_MAX_PLANES]; + unsigned int dummy_long_array[8][DML2_MAX_PLANES]; bool dummy_boolean_array[2][DML2_MAX_PLANES]; bool dummy_boolean[2]; double dummy_single[2]; @@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals { double dlg_vblank_start; double LSetup; double blank_lines_remaining; - double TotalWRBandwidth; double WRBandwidth; struct dml2_core_internal_DmlPipe myPipe; double PixelClockBackEndFactor; @@ -1153,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; // Output bool *PTEBufferSizeNotExceeded; @@ -1389,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals { double vm_row_bw; double flip_and_active_bw; double flip_and_prefetch_bw; - double flip_and_prefetch_bw_oto; + double flip_and_prefetch_bw_max; double active_and_excess_bw; }; @@ -1418,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals { struct dml2_core_shared_rq_dlg_get_dlg_reg_locals { unsigned int plane_idx; + unsigned int stream_idx; enum dml2_source_format_class source_format; const struct dml2_timing_cfg *timing; bool dual_plane; @@ -1625,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { double *BytePerPixDETC; unsigned int *DPPPerSurface; bool mrq_present; + unsigned int dummy[2][DML2_MAX_PLANES]; + unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES]; + unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES]; // output unsigned int *req_per_swath_ub_l; @@ -1642,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { unsigned int *DETBufferSizeC; unsigned int *full_swath_bytes_l; unsigned int *full_swath_bytes_c; + unsigned int *full_swath_bytes_single_dpp_l; + unsigned int *full_swath_bytes_single_dpp_c; bool *UnboundedRequestEnabled; unsigned int *compbuf_reserved_space_64b; unsigned int *CompressedBufferSizeInkByte; @@ -1801,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params { double *VRatioPrefetchC; double *RequiredPrefetchPixelDataBWLuma; double *RequiredPrefetchPixelDataBWChroma; - double *RequiredPrefetchBWOTO; + double *RequiredPrefetchBWMax; bool *NotEnoughTimeForDynamicMetadata; double *Tno_bw; double *Tno_bw_flip; @@ -2038,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params { double *surface_read_bandwidth_c; double *prefetch_bandwidth_l; double *prefetch_bandwidth_c; - double *prefetch_bandwidth_oto; + double *prefetch_bandwidth_max; double *excess_vactive_fill_bw_l; double *excess_vactive_fill_bw_c; double *cursor_bw; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index 7a220c0141c2..5f301befed16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode) { - return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements); + return sw_mode == dml2_sw_linear; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index f486b090bbfc..22969a533a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo if (result) result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk); - if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk); - for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { if (result) result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h index b226225103c3..611c80f4f1bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h @@ -10,15 +10,74 @@ #define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN #define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__) -/* ASSERT with message output */ -#define DML_ASSERT_MSG(condition, fmt, ...) \ - do { \ - if (!(condition)) { \ - DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \ - DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ - DML_ASSERT(condition); \ - } \ - } while (0) +/* private helper macros */ +#define _BOOL_FORMAT(field) "%s", field ? "true" : "false" +#define _UINT_FORMAT(field) "%u", field +#define _INT_FORMAT(field) "%d", field +#define _DOUBLE_FORMAT(field) "%lf", field +#define _ELEMENT_FUNC "function" +#define _ELEMENT_COMP_IF "component_interface" +#define _ELEMENT_TOP_IF "top_interface" +#define _LOG_ENTRY(element) do { \ + DML_LOG_INTERNAL("<"element" name=\""); \ + DML_LOG_INTERNAL(__func__); \ + DML_LOG_INTERNAL("\">\n"); \ +} while (0) +#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n") +#define _LOG_SCALAR(field, format) do { \ + DML_LOG_INTERNAL(#field" = "format(field)); \ + DML_LOG_INTERNAL("\n"); \ +} while (0) +#define _LOG_ARRAY(field, size, format) do { \ + DML_LOG_INTERNAL(#field " = ["); \ + for (int _i = 0; _i < (int) size; _i++) { \ + DML_LOG_INTERNAL(format(field[_i])); \ + if (_i + 1 == (int) size) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ +}} while (0) +#define _LOG_2D_ARRAY(field, size0, size1, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL(format(field[_i][_j])); \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) +#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL("["); \ + for (int _k = 0; _k < (int) size2; _k++) { \ + DML_LOG_INTERNAL(format(field[_i][_j][_k])); \ + if (_k + 1 == (int) size2) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) /* fatal errors for unrecoverable DML states until a full reset */ #define DML_LOG_LEVEL_FATAL 0 @@ -28,7 +87,7 @@ #define DML_LOG_LEVEL_WARN 2 /* high level tracing of DML interfaces */ #define DML_LOG_LEVEL_INFO 3 -/* detailed tracing of DML internal components */ +/* tracing of DML internal executions */ #define DML_LOG_LEVEL_DEBUG 4 /* detailed tracing of DML calculation procedure */ #define DML_LOG_LEVEL_VERBOSE 5 @@ -37,30 +96,94 @@ #define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT #endif /* #ifndef DML_LOG_LEVEL */ +/* public macros for DML_LOG_LEVEL_FATAL and up */ #define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_ERROR and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR #define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__) +#define DML_ASSERT_MSG(condition, fmt, ...) \ + do { \ + if (!(condition)) { \ + DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \ + DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ + DML_ASSERT(condition); \ + } \ + } while (0) #else #define DML_LOG_ERROR(fmt, ...) ((void)0) +#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_WARN and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN #define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__) #else #define DML_LOG_WARN(fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_INFO and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO #define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__) +#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF) +#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF) #else #define DML_LOG_INFO(fmt, ...) ((void)0) +#define DML_LOG_TOP_IF_ENTER() ((void)0) +#define DML_LOG_TOP_IF_EXIT() ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_DEBUG and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG -#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__) +#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) +#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF) +#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF) +#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC) +#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC) +#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT) +#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT) +#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT) +#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT) #else #define DML_LOG_DEBUG(fmt, ...) ((void)0) +#define DML_LOG_COMP_IF_ENTER() ((void)0) +#define DML_LOG_COMP_IF_EXIT() ((void)0) +#define DML_LOG_FUNC_ENTER() ((void)0) +#define DML_LOG_FUNC_EXIT() ((void)0) +#define DML_LOG_DEBUG_BOOL(field) ((void)0) +#define DML_LOG_DEBUG_UINT(field) ((void)0) +#define DML_LOG_DEBUG_INT(field) ((void)0) +#define DML_LOG_DEBUG_DOUBLE(field) ((void)0) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_VERBOSE */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE -#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__) +#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) #else #define DML_LOG_VERBOSE(fmt, ...) ((void)0) -#endif +#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */ #endif /* __DML2_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h index 00688b9f1df4..d52aa82283b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h @@ -202,6 +202,8 @@ struct dml2_core_mode_support_result { } active; unsigned int dispclk_khz; + unsigned int dpprefclk_khz; + unsigned int dtbrefclk_khz; unsigned int dcfclk_deepsleep_khz; unsigned int socclk_khz; @@ -446,13 +448,17 @@ struct dml2_core_internal_state_intermediates { }; struct dml2_core_mode_support_locals { - struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + union { + struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params; }; struct dml2_core_mode_programming_locals { - struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + union { + struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 6b3b8803e0ae..a56e75cdf712 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 208630754c8a..3b866e876bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -1189,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2 return location; } -static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg) -{ - int i; - - if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - ASSERT(state->stream_count == 1); - dml_dispcfg->timing.DRRDisplay[0] = true; - } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) { - - for (i = 0; i < dml_dispcfg->num_timings; i++) { - if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id) - dml_dispcfg->timing.DRRDisplay[i] = true; - } - } -} - static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) { unsigned int i; @@ -1437,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat } } } - - if (!dml2->config.use_native_pstate_optimization) - apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg); } void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 525b7d04bf84..0318260370ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -24,8 +24,6 @@ * */ -#include <linux/vmalloc.h> - #include "display_mode_core.h" #include "dml2_internal_types.h" #include "dml2_utils.h" @@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2, static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, const struct dml_display_cfg_st *display_cfg, - struct dml_mode_support_info_st *evaluation_info) + struct dml_mode_support_info_st *evaluation_info, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx; s->mode_support_params.in_display_cfg = display_cfg; + if (validate_mode == DC_VALIDATE_MODE_ONLY) + s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1; + else + s->mode_support_params.in_start_state_idx = 0; s->mode_support_params.out_evaluation_info = evaluation_info; memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st)); @@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p) { int unused_dpps = p->ip_params->max_num_dpp; - int i, j; - int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count; - int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index; - float frame_time_sec, max_frame_time_sec; + int i; + int odms_needed; int largest_blend_and_timing = 0; bool optimization_done = false; @@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe if (p->new_display_config != p->cur_display_config) *p->new_display_config = *p->cur_display_config; - // Optimize P-State Support - if (dml2->config.use_native_pstate_optimization) { - if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) { - // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp - subvp_timing_to_add = -1; - subvp_surface_to_add = -1; - max_frame_time_sec = 0; - surface_count = 0; - for (i = 0; i < (int) p->cur_display_config->num_timings; i++) { - refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000, - (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i])); - if (refresh_rate_hz < 120) { - // Check its upstream surfaces to see if this one could be converted to subvp. - dpps_needed = 0; - for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) { - if (p->cur_display_config->plane.BlendingAndTiming[j] == i && - p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) { - dpps_needed += p->cur_mode_support_info->DPPPerSurface[j]; - subvp_surface_to_add = j; - surface_count++; - } - } - - if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) { - frame_time_sec = (float)1 / refresh_rate_hz; - if (frame_time_sec > max_frame_time_sec) { - max_frame_time_sec = frame_time_sec; - subvp_timing_to_add = i; - } - } - } - } - if (subvp_timing_to_add >= 0) { - new_timing_index = p->new_display_config->num_timings++; - new_surface_index = p->new_display_config->num_surfaces++; - // Add a phantom pipe reflecting the main pipe's timing - dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add); - - pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us + - p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) * - (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) / - (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]); - - subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines; - - p->new_display_config->timing.VActive[new_timing_index] = subvp_height; - p->new_display_config->timing.VTotal[new_timing_index] = subvp_height + - p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add]; - - p->new_display_config->output.OutputDisabled[new_timing_index] = true; - - p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport; - - dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add); - dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add); - - p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportStationary[new_surface_index] = false; - - p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable; - p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe; - - p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0; - - p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required; - - p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index; - - optimization_done = true; - } - } - } // Optimize Clocks if (!optimization_done) { @@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe return optimization_done; } -static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state) +static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch; struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch; @@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us; } - dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info); + dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info, + validate_mode); if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) { map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info); @@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const } static bool dml_mode_support_wrapper(struct dml2_context *dml2, - struct dc_state *display_state) + struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; unsigned int result = 0, i; @@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info); @@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, dml2->v20.dml_core_ctx.policy = s->new_policy; optimized_result = pack_and_call_dml_mode_support_ex(dml2, &s->new_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (optimized_result) optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info); @@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, if (!optimized_result) { result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); } } @@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, return result; } -static int find_drr_eligible_stream(struct dc_state *display_state) -{ - int i; - - for (i = 0; i < display_state->stream_count; i++) { - if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE - && display_state->streams[i]->ignore_msa_timing_param) { - // Use ignore_msa_timing_param flag to identify as DRR - return i; - } - } - - return -1; -} - -static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state) -{ - struct dml2_wrapper_scratch *s = &dml2->v20.scratch; - bool pstate_optimization_done = false; - bool pstate_optimization_success = false; - bool result = false; - int drr_display_index = 0, non_svp_streams = 0; - bool force_svp = dml2->config.svp_pstate.force_enable_subvp; - - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - - result = dml_mode_support_wrapper(dml2, display_state); - - if (!result) { - pstate_optimization_done = true; - } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } - - if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) { - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; - - result = dml_mode_support_wrapper(dml2, display_state); - } else { - non_svp_streams = display_state->stream_count; - - while (!pstate_optimization_done) { - result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true); - - // Always try adding SVP first - if (result) - result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info); - else - pstate_optimization_done = true; - - - if (result) { - result = dml_mode_support_wrapper(dml2, display_state); - } else { - pstate_optimization_done = true; - } - - if (result) { - non_svp_streams--; - - if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } else { - drr_display_index = find_drr_eligible_stream(display_state); - - // If there is only 1 remaining non SubVP pipe that is DRR, check static - // schedulability for SubVP + DRR. - if (non_svp_streams == 1 && drr_display_index >= 0) { - if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) { - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index; - result = dml_mode_support_wrapper(dml2, display_state); - } - - if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } - - if (pstate_optimization_success) { - pstate_optimization_done = true; - } else { - pstate_optimization_done = false; - } - } - } - } - } - - if (!pstate_optimization_success) { - dml2_svp_remove_all_phantom_pipes(dml2, display_state); - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - result = dml_mode_support_wrapper(dml2, display_state); - } - - return result; -} - -static bool call_dml_mode_support_and_programming(struct dc_state *context) +static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode) { unsigned int result = 0; unsigned int min_state = 0; @@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) struct dml2_wrapper_scratch *s = &dml2->v20.scratch; if (!context->streams[0]->sink->link->dc->caps.is_apu) { - min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context); + min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context, + validate_mode); ASSERT(min_state_for_g6_temp_read >= 0); } - if (!dml2->config.use_native_pstate_optimization) { - result = optimize_pstate_with_svp_and_drr(dml2, context); - } else { - result = dml_mode_support_wrapper(dml2, context); - } + result = dml_mode_support_wrapper(dml2, context, validate_mode); /* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on. * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly. @@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) return result; } -static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context) +static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { struct dml2_context *dml2 = context->bw_ctx.dml2; struct dml2_wrapper_scratch *s = &dml2->v20.scratch; @@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4); - result = call_dml_mode_support_and_programming(context); + result = call_dml_mode_support_and_programming(context, validate_mode); /* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation * is required or not, the resource context needs to correctly reflect the number of active pipes. We would * only know the correct number if active pipes after dml2_map_dc_pipes is called. @@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch); if (need_recalculation) { /* Engage the DML again if recalculation is required. */ - call_dml_mode_support_and_programming(context); + call_dml_mode_support_and_programming(context, validate_mode); if (!dml2->config.skip_hw_state_mapping) { dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state); } @@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s return result; } -static bool dml2_validate_only(struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode) { struct dml2_context *dml2; unsigned int result = 0; @@ -708,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context) result = pack_and_call_dml_mode_support_ex(dml2, &dml2->v20.scratch.cur_display_config, - &dml2->v20.scratch.mode_support_info); + &dml2->v20.scratch.mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info); @@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, + enum dc_validate_mode validate_mode) { bool out = false; @@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 /* DML2.1 validation path */ if (dml2->architecture == dml2_architecture_21) { - out = dml21_validate(in_dc, context, dml2, fast_validate); + out = dml21_validate(in_dc, context, dml2, validate_mode); return out; } DC_FP_START(); - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) - out = dml2_validate_only(context); + /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + out = dml2_validate_only(context, validate_mode); else - out = dml2_validate_and_build_resource(in_dc, context); + out = dml2_validate_and_build_resource(in_dc, context, validate_mode); DC_FP_END(); @@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } @@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. - if ((in_dc->debug.using_dml21) - && (in_dc->ctx->dce_version == DCN_VERSION_4_01 - )) + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) return dml21_create(in_dc, dml2, config); // Allocate Mode Lib Ctx @@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 5100f269368e..c384e141cebc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -240,7 +240,7 @@ struct dml2_configuration_options { bool use_clock_dc_limits; bool gpuvm_enable; bool force_tdlut_enable; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; /* @@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc, * dml2_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx. * * DML1.0 compatible interface for validation. * @@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc, bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info. diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 97bf26fa3573..36187f890d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp401_full_bypass, + .dpp_full_bypass = NULL, .dpp_setup = dpp401_dpp_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h index ecaa976e1f52..5a6a861402b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h @@ -641,6 +641,7 @@ uint32_t ISHARP_DELTA_DATA; \ uint32_t ISHARP_DELTA_INDEX; \ uint32_t ISHARP_NLDELTA_SOFT_CLIP + struct dcn401_dpp_registers { DPP_REG_VARIABLE_LIST_DCN401; }; @@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale( struct dpp *dpp_base, const struct scaler_data *scl_data); -void dpp401_full_bypass(struct dpp *dpp_base); - void dpp401_dpp_setup( struct dpp *dpp_base, enum surface_pixel_format format, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 712aff7e17f7..7aab77b58869 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -88,30 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -void dpp401_full_bypass(struct dpp *dpp_base) -{ - struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - void dpp401_set_cursor_attributes( struct dpp *dpp_base, struct dc_cursor_attributes *cursor_attributes) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 11535922b5ff..a454d16e6586 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -30,6 +30,9 @@ #include "rc_calc.h" #include "fixed31_32.h" +#include "clk_mgr.h" +#include "resource.h" + #define DC_LOGGER \ dsc->ctx->logger @@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } /* Forward Declerations */ +static unsigned int get_min_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing); + static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, @@ -183,6 +191,7 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slice_count, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, return true; } - /* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and * timing's pixel clock and uncompressed bandwidth. * If DSC is not possible, leave '*range' untouched. @@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; + unsigned int min_slice_count; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config = {0}; @@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range( get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, link_encoding, &config); + &options, link_encoding, min_slice_count, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, @@ -525,20 +536,152 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc, DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp); } + +static void build_dsc_enc_combined_slice_caps( + const struct dsc_enc_caps *single_dsc_enc_caps, + struct dsc_enc_caps *dsc_enc_caps, + unsigned int max_odm_combine_factor) +{ + /* 1-16 slice configurations, single DSC */ + dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw; + + /* 2x DSC's */ + if (max_odm_combine_factor >= 2) { + /* 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + + /* 8 + 8 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + } + + /* 3x DSC's */ + if (max_odm_combine_factor >= 3) { + /* 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } + + /* 4x DSC's */ + if (max_odm_combine_factor >= 4) { + /* 1 + 1 + 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 + 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 3 + 3 + 3 + 3 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3; + + /* 4 + 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } +} + +static void build_dsc_enc_caps( + const struct display_stream_compressor *dsc, + struct dsc_enc_caps *dsc_enc_caps) +{ + unsigned int max_dscclk_khz; + unsigned int num_dsc; + unsigned int max_odm_combine_factor; + struct dsc_enc_caps single_dsc_enc_caps; + + struct dc *dc; + + memset(&single_dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps) + return; + + dc = dsc->ctx->dc; + + if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool) + return; + + /* get max DSCCLK from clk_mgr */ + max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK); + + dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz); + + /* global capabilities */ + dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version; + dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth; + dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported; + dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width; + dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div; + dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw; + dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw; + + /* expand per DSC capabilities to global */ + max_odm_combine_factor = dc->caps.max_odm_combine_factor; + num_dsc = dc->res_pool->res_cap->num_dsc; + max_odm_combine_factor = min(max_odm_combine_factor, num_dsc); + dsc_enc_caps->max_total_throughput_mps = + single_dsc_enc_caps.max_total_throughput_mps * + max_odm_combine_factor; + + /* check slice counts possible for with ODM combine */ + build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor); +} + +static inline uint32_t dsc_div_by_10_round_up(uint32_t value) +{ + return (value + 9) / 10; +} + +static unsigned int get_min_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing) +{ + unsigned int max_dispclk_khz; + + /* get max pixel rate and combine caps */ + max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000; + if (dsc && dsc->ctx->dc) { + if (dsc->ctx->dc->clk_mgr && + dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) { + /* dispclk is available */ + max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK); + } + } + + /* consider minimum odm slices required due to + * 1) display pipe throughput (dispclk) + * 2) max image width per slice + */ + return dc_fixpt_ceil(dc_fixpt_max( + dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)), + max_dispclk_khz), // throughput + dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right), + dsc_enc_caps->max_slice_width))); // slice width +} + static void get_dsc_enc_caps( const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { - // This is a static HW query, so we can use any DSC - memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) { + + if (!dsc) + return; + + /* check if reported cap global or only for a single DCN DSC enc */ + if (dsc->funcs->dsc_get_enc_caps) { if (!dsc->ctx->dc->debug.disable_dsc) dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); - if (dsc->ctx->dc->debug.native422_support) - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + } else { + build_dsc_enc_caps(dsc, dsc_enc_caps); } + + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } /* Returns 'false' if no intersection was found for at least one capability. @@ -621,11 +764,6 @@ static bool intersect_dsc_caps( return true; } -static inline uint32_t dsc_div_by_10_round_up(uint32_t value) -{ - return (value + 9) / 10; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -910,11 +1048,11 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slices_h, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; int max_slices_h = 0; - int min_slices_h = 0; int num_slices_h = 0; int pic_width; int slice_width; @@ -1018,12 +1156,9 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - min_slices_h = pic_width / dsc_common_caps.max_slice_width; - if (pic_width % dsc_common_caps.max_slice_width) - min_slices_h++; - min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h); + /* increase minimum slice count to meet sink throughput limitations */ while (min_slices_h <= max_slices_h) { int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h; if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000) @@ -1032,14 +1167,12 @@ static bool setup_dsc_config( min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); } - is_dsc_possible = (min_slices_h <= max_slices_h); - - if (pic_width % min_slices_h != 0) - min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? - - if (min_slices_h == 0 && max_slices_h == 0) - is_dsc_possible = false; + /* increase minimum slice count to meet divisibility requirements */ + while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) { + min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); + } + is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0; if (!is_dsc_possible) goto done; @@ -1162,12 +1295,19 @@ bool dc_dsc_compute_config( { bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - + unsigned int min_slice_count; get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + + min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, link_encoding, dsc_cfg); + timing, + options, + link_encoding, + min_slice_count, + dsc_cfg); return is_dsc_possible; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 4222679fd4c9..7bd92ae8b13e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -9,19 +9,14 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000 -#define MAX_DSC_UNIT_COMBINE 4 - static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); /* Object I/F functions */ //static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); //static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn401_dsc_funcs = { - .dsc_get_enc_caps = dsc401_get_enc_caps, .dsc_read_state = dsc401_read_state, .dsc_validate_stream = dsc401_validate_stream, .dsc_set_config = dsc401_set_config, @@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disable = dsc401_disable, .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -66,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc, dsc->max_image_width = 5184; } -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) { - int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ; - dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ - /* 1 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0; - /* 2 slice is only supported with 1 or 2 DSC units */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0; - /* 3 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; dsc_enc_caps->lb_bit_depth = 13; dsc_enc_caps->is_block_pred_supported = true; @@ -95,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; - dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE; + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ @@ -191,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc) DSC_CLOCK_EN, 0); } -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) { struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h index e3ca70058e64..7acd57eb4f42 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h @@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe); void dsc401_disable(struct display_stream_compressor *dsc); void dsc401_disconnect(struct display_stream_compressor *dsc); +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 1ebce5426a58..b0bd1f9425b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -108,6 +108,7 @@ struct dsc_funcs { void (*dsc_disable)(struct display_stream_compressor *dsc); void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); + void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index c7765e6f09e6..f8f991785d4f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -666,10 +666,29 @@ struct dcn_mi_mask { DCN_HUBP_REG_FIELD_LIST(uint32_t); }; +struct dcn_fl_regs_st { + uint32_t lut_enable; + uint32_t lut_done; + uint32_t lut_addr_mode; + uint32_t lut_width; + uint32_t lut_tmz; + uint32_t lut_crossbar_sel_r; + uint32_t lut_crossbar_sel_g; + uint32_t lut_crossbar_sel_b; + uint32_t lut_addr_hi; + uint32_t lut_addr_lo; + uint32_t refcyc_3dlut_group; + uint32_t lut_fl_bias; + uint32_t lut_fl_scale; + uint32_t lut_fl_mode; + uint32_t lut_fl_format; +}; + struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; struct _vcs_dpi_display_ttu_regs_st ttu_attr; struct _vcs_dpi_display_rq_regs_st rq_regs; + struct dcn_fl_regs_st fl_regs; uint32_t pixel_format; uint32_t inuse_addr_hi; uint32_t inuse_addr_lo; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index baed31611477..705b98b1b6cc 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width); } -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled) +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0); + REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits); } void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index 6e1d4c90ddd4..608e6153fa68 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled); +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits); void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 38e17b1796e1..4ea13d0bf815 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1186,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - if (dccg && dccg->funcs->set_dtbclk_dto) - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) { + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -1379,7 +1381,7 @@ static void populate_audio_dp_link_info( } } -static void build_audio_output( +void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, struct audio_output *audio_output) @@ -1684,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + /* Temporary workaround to perform DSC programming ahead of stream enablement + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, true); + } + if (!stream->dpms_off) dc->link_srv->set_dpms_on(context, pipe_ctx); @@ -1925,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) can_apply_edp_fast_boot = dc_validate_boot_timing(dc, edp_stream->sink, &edp_stream->timing); + + // For Mux-platform, the default value is false. + // Disable fast boot during mux switching. + // The flag would be clean after switching done. + if (dc->is_switch_in_progress_dest && edp_link->is_dds) + can_apply_edp_fast_boot = false; + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) { DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n"); @@ -1968,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (edp_with_sink_num) edp_link_with_sink = edp_links_with_sink[0]; + // During a mux switch, powering down the HW blocks and then enabling + // the link via a DPCD SET_POWER write causes a brief flash + keep_edp_vdd_on |= dc->is_switch_in_progress_dest; + if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 06789ac3a224..7cd8c1576988 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -110,5 +110,9 @@ void dce110_enable_dp_link_output( enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings); +void build_audio_output( + struct dc_state *state, + const struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output); #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index f9ee55998b6b..39910f73ecd0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -327,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) } } + DTN_INFO("\n=======HUBP FL======\n"); + DTN_INFO( + "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct dcn_fl_regs_st *fl_regs = &s->fl_regs; + + if (!s->blank_en) { + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x", + pool->hubps[i]->inst, + fl_regs->lut_enable, + fl_regs->lut_done, + fl_regs->lut_addr_mode, + fl_regs->lut_width, + fl_regs->lut_tmz, + fl_regs->lut_crossbar_sel_r, + fl_regs->lut_crossbar_sel_g, + fl_regs->lut_crossbar_sel_b, + fl_regs->lut_addr_hi, + fl_regs->lut_addr_lo, + fl_regs->refcyc_3dlut_group, + fl_regs->lut_fl_bias, + fl_regs->lut_fl_scale, + fl_regs->lut_fl_mode, + fl_regs->lut_fl_format); + DTN_INFO("\n"); + } + } + DTN_INFO("\n=========RQ========\n"); DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" @@ -511,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc, dc->caps.color.mpc.num_3dluts, dc->caps.color.mpc.ogam_ram, dc->caps.color.mpc.ocsc); + DTN_INFO("===== MPC RMCM 3DLUT =====\n"); + DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur, + s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask, + s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel, + s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done, + s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state, + s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode); + } + DTN_INFO("\n"); + DTN_INFO("===== MPC RMCM Shaper =====\n"); + DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur, + s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b, + s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b, + s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state, + s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode); + } } void dcn10_log_hw_state(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index c277df12c817..3207addbd4eb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock( } /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { pipe_ctx->stream_res.tg->funcs->set_gsl( pipe_ctx->stream_res.tg, &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); } else BREAK_TO_DEBUGGER(); } @@ -956,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); + fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1971,14 +1970,6 @@ static void dcn20_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2492,7 +2483,7 @@ bool dcn20_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2816,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe( { struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dccg *dccg = dc->res_pool->dccg; + struct dtbclk_dto_params dto_params = {0}; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { @@ -2876,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe( &pipe_ctx->link_res, pipe_ctx->stream->signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg + && dc->ctx->dce_version >= DCN_VERSION_3_5) { + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } /* diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 5ba3999991b0..8ba934b83957 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); + /* Temporary workaround to perform DSC programming ahead of pipe reset + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, false); + } + /* free acquired resources */ if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index a0b05b9ef660..416b1dca3dac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); + if (!odm_dsc) + continue; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index c4177a9a662f..cc9f40d97af2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -2,6 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. + +#include "os_types.h" #include "dm_services.h" #include "basics/dc_common.h" #include "dm_helpers.h" @@ -49,7 +51,7 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn401_initialize_min_clocks(struct dc *dc) +void dcn401_initialize_min_clocks(struct dc *dc) { struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; @@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // mark dcmode limits present if any clock has distinct AC and DC values from SMU - dc->caps.dcmode_power_limits_present = - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); + dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present && + dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr); } // Initialize the dccg @@ -396,249 +393,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct } } -static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend) -{ - struct mpc *mpc = dc->res_pool->mpc; - int mpcc_id = pipe_ctx->plane_res.hubp->inst; - - if (!pipe_ctx->plane_state) - return; - - mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); - pipe_ctx->plane_state->mcm_location = (bPostBlend) ? - MPCC_MOVABLE_CM_LOCATION_AFTER : - MPCC_MOVABLE_CM_LOCATION_BEFORE; -} - -static void dc_get_lut_mode( - enum dc_cm2_gpu_mem_layout layout, - enum hubp_3dlut_fl_mode *mode, - enum hubp_3dlut_fl_addressing_mode *addr_mode) -{ - switch (layout) { - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: - *mode = hubp_3dlut_fl_mode_native_1; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: - *mode = hubp_3dlut_fl_mode_native_2; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: - *mode = hubp_3dlut_fl_mode_transform; - *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; - break; - default: - *mode = hubp_3dlut_fl_mode_disable; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - } -} - -static void dc_get_lut_format( - enum dc_cm2_gpu_mem_format dc_format, - enum hubp_3dlut_fl_format *format) -{ - switch (dc_format) { - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: - *format = hubp_3dlut_fl_format_unorm_12msb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: - *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: - *format = hubp_3dlut_fl_format_float_fp1_5_10; - break; - } -} - -static void dc_get_lut_xbar( - enum dc_cm2_gpu_mem_pixel_component_order order, - enum hubp_3dlut_fl_crossbar_bit_slice *cr_r, - enum hubp_3dlut_fl_crossbar_bit_slice *y_g, - enum hubp_3dlut_fl_crossbar_bit_slice *cb_b) -{ - switch (order) { - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15; - break; - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; - break; - } -} - -static void dc_get_lut_width( - enum dc_cm2_gpu_mem_size size, - enum hubp_3dlut_fl_width *width) -{ - switch (size) { - case DC_CM2_GPU_MEM_SIZE_333333: - *width = hubp_3dlut_fl_width_33; - break; - case DC_CM2_GPU_MEM_SIZE_171717: - *width = hubp_3dlut_fl_width_17; - break; - case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: - *width = hubp_3dlut_fl_width_transformed; - break; - } -} -static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc) -{ - if (mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.program_lut_read_write_control && - hubp->funcs->hubp_program_3dlut_fl_addr && - mpc->funcs->rmcm.program_bit_depth && - hubp->funcs->hubp_program_3dlut_fl_mode && - hubp->funcs->hubp_program_3dlut_fl_addressing_mode && - hubp->funcs->hubp_program_3dlut_fl_format && - hubp->funcs->hubp_update_3dlut_fl_bias_scale && - mpc->funcs->rmcm.program_bias_scale && - hubp->funcs->hubp_program_3dlut_fl_crossbar && - hubp->funcs->hubp_program_3dlut_fl_width && - mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.populate_lut && - mpc->funcs->rmcm.program_lut_mode && - hubp->funcs->hubp_enable_3dlut_fl && - mpc->funcs->rmcm.enable_3dlut_fl) - return true; - - return false; -} - -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id) -{ - struct dpp *dpp_base = pipe_ctx->plane_res.dpp; - union mcm_lut_params m_lut_params; - enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable; - enum hubp_3dlut_fl_mode mode; - enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_format format = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; - enum hubp_3dlut_fl_width width = 0; - struct dc *dc = hubp->ctx->dc; - - bool bypass_rmcm_3dlut = false; - bool bypass_rmcm_shaper = false; - - dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - - /* 3DLUT */ - switch (lut3d_src) { - case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - memset(&m_lut_params, 0, sizeof(m_lut_params)); - // Don't know what to do in this case. - //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - break; - case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: - dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width); - if (!dc_is_rmcm_3dlut_supported(hubp, mpc) || - !mpc->funcs->rmcm.is_config_supported(width)) - return false; - - //0. disable fl on mpc - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF); - - //1. power down the block - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false); - - //2. program RMCM - //2a. 3dlut reg programming - mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, - (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id); - - hubp->funcs->hubp_program_3dlut_fl_addr(hubp, - mcm_luts->lut3d_data.gpu_mem_params.addr); - - mpc->funcs->rmcm.program_bit_depth(mpc, - mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id); - - // setting native or transformed mode, - dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode); - - //these program the mcm 3dlut - hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); - - hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); - - //seems to be only for the MCM - dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format); - hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); - - mpc->funcs->rmcm.program_bias_scale(mpc, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale, - mpcc_id); - hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale); - - dc_get_lut_xbar( - mcm_luts->lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); - - hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, - crossbar_bit_slice_cr_r, - crossbar_bit_slice_y_g, - crossbar_bit_slice_cb_b); - - mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id); - - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); - - //2b. shaper reg programming - memset(&m_lut_params, 0, sizeof(m_lut_params)); - - if (mcm_luts->shaper->type == TF_TYPE_HWPWL) { - m_lut_params.pwl = &mcm_luts->shaper->pwl; - } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - ASSERT(false); - cm_helper_translate_curve_to_hw_format( - dc->ctx, - mcm_luts->shaper, - &dpp_base->regamma_params, true); - m_lut_params.pwl = &dpp_base->regamma_params; - } - if (m_lut_params.pwl) { - mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); - mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id); - } else { - //RMCM 3dlut won't work without its shaper - return false; - } - - //3. Select the hubp connected to this RMCM - hubp->funcs->hubp_enable_3dlut_fl(hubp, true); - mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id); - - //4. power on the block - if (m_lut_params.pwl) - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true); - - break; - default: - return false; - } - - return true; -} - void dcn401_populate_mcm_luts(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, @@ -664,25 +418,6 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - //MCM - setting its location (Before/After) blender - //set to post blend (true) - dcn401_set_mcm_location_post_blend( - dc, - pipe_ctx, - mcm_luts.lut3d_data.mpc_mcm_post_blend); - - //RMCM - 3dLUT+Shaper - if (mcm_luts.lut3d_data.rmcm_3dlut_enable) { - dcn401_program_rmcm_luts( - hubp, - pipe_ctx, - lut3d_src, - &mcm_luts, - mpc, - lut_bank_a, - mpcc_id); - } - /* 1D LUT */ if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); @@ -740,15 +475,15 @@ void dcn401_populate_mcm_luts(struct dc *dc, break; case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: switch (mcm_luts.lut3d_data.gpu_mem_params.size) { - case DC_CM2_GPU_MEM_SIZE_333333: - width = hubp_3dlut_fl_width_33; - break; case DC_CM2_GPU_MEM_SIZE_171717: width = hubp_3dlut_fl_width_17; break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: width = hubp_3dlut_fl_width_transformed; break; + default: + //TODO: handle default case + break; } //check for support @@ -817,11 +552,14 @@ void dcn401_populate_mcm_luts(struct dc *dc, //navi 4x has a bug and r and blue are swapped and need to be worked around here in //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x - dc_get_lut_xbar( - mcm_luts.lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); + switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { + case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: + default: + crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; + crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; + crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; + break; + } if (hubp->funcs->hubp_program_3dlut_fl_crossbar) hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, @@ -2269,14 +2007,6 @@ void dcn401_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2651,7 +2381,7 @@ bool dcn401_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2902,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 1); + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); + } if (hws->funcs.dpp_pg_control) hws->funcs.dpp_pg_control(hws, dpp->inst, false); @@ -2916,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc, hubp->funcs->hubp_reset(hubp); dpp->funcs->dpp_reset(dpp); - if (org_ip_request_cntl == 0) + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index ce65b4f6c672..2621b7725267 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -109,12 +109,5 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id); +void dcn401_initialize_min_clocks(struct dc *dc); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 3a0795045bc6..9df8030e37f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -502,6 +502,9 @@ void get_hdr_visual_confirm_color( void get_mpctree_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color); void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index f3696143590c..82085d9c3f40 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -59,6 +59,7 @@ enum dc_status { DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, DC_FAIL_DP_LINK_BANDWIDTH = 28, DC_FAIL_HW_CURSOR_SUPPORT = 29, + DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 0cf349cafb3e..f0d7185153b2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -67,6 +67,8 @@ struct resource_context; struct clk_bw_params; struct dc_mcache_params; +#define MAX_RMCM_INST 2 + struct resource_funcs { enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); void (*destroy)(struct resource_pool **pool); @@ -82,7 +84,7 @@ struct resource_funcs { enum dc_status (*validate_bandwidth)( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void (*calculate_wm_and_dlg)( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -107,7 +109,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * Algorithm for assigning available link encoders to links. @@ -223,6 +225,11 @@ struct resource_funcs { const struct dc_stream_state *stream); bool (*program_mcache_pipe_config)(struct dc_state *context, const struct dc_mcache_params *mcache_params); + enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); }; struct audio_support{ @@ -281,6 +288,7 @@ struct resource_pool { struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; + struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST]; struct { unsigned int xtalin_clock_inKhz; @@ -556,7 +564,10 @@ struct dcn_bw_output { struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; struct dmub_cmd_fams2_global_config fams2_global_config; union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES]; - union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union { + union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES]; + }; struct dml2_display_arb_regs arb_regs; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index d19a595c2be4..134091d5842d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults; bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn_get_soc_clks( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index c14d64687a3d..2c9a4a12bd8a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -100,6 +100,17 @@ struct dcn301_clk_internal { #define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 +enum clk_type { + CLK_TYPE_DCFCLK, + CLK_TYPE_FCLK, + CLK_TYPE_MCLK, + CLK_TYPE_SOCCLK, + CLK_TYPE_DTBCLK, + CLK_TYPE_DISPCLK, + CLK_TYPE_DPPCLK, + CLK_TYPE_DSCCLK, + CLK_TYPE_COUNT +}; struct clk_limit_table_entry { unsigned int voltage; /* milivolts withh 2 fractional bits */ @@ -324,6 +335,11 @@ struct clk_mgr_funcs { int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base); + bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr); + + uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set); + + unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); }; struct clk_mgr { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index e94e9ba60f55..61c4d2a7db1c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -211,7 +211,7 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); - void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst); + void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index b610beb075d5..cee29e89ec5c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -282,7 +282,7 @@ struct hubp_funcs { void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable); void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode); void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width); - void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled); + void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits); void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 6e303b81bfb0..7641439f6ca0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment { enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id; }; +struct mpc_rmcm_regs { + uint32_t rmcm_3dlut_mem_pwr_state; + uint32_t rmcm_3dlut_mem_pwr_force; + uint32_t rmcm_3dlut_mem_pwr_dis; + uint32_t rmcm_3dlut_mem_pwr_mode; + uint32_t rmcm_3dlut_size; + uint32_t rmcm_3dlut_mode; + uint32_t rmcm_3dlut_mode_cur; + uint32_t rmcm_3dlut_read_sel; + uint32_t rmcm_3dlut_30bit_en; + uint32_t rmcm_3dlut_wr_en_mask; + uint32_t rmcm_3dlut_ram_sel; + uint32_t rmcm_3dlut_out_norm_factor; + uint32_t rmcm_3dlut_fl_sel; + uint32_t rmcm_3dlut_out_offset_r; + uint32_t rmcm_3dlut_out_scale_r; + uint32_t rmcm_3dlut_fl_done; + uint32_t rmcm_3dlut_fl_soft_underflow; + uint32_t rmcm_3dlut_fl_hard_underflow; + uint32_t rmcm_cntl; + uint32_t rmcm_shaper_mem_pwr_state; + uint32_t rmcm_shaper_mem_pwr_force; + uint32_t rmcm_shaper_mem_pwr_dis; + uint32_t rmcm_shaper_mem_pwr_mode; + uint32_t rmcm_shaper_lut_mode; + uint32_t rmcm_shaper_mode_cur; + uint32_t rmcm_shaper_lut_write_en_mask; + uint32_t rmcm_shaper_lut_write_sel; + uint32_t rmcm_shaper_offset_b; + uint32_t rmcm_shaper_scale_b; + uint32_t rmcm_shaper_rama_exp_region_start_b; + uint32_t rmcm_shaper_rama_exp_region_start_seg_b; + uint32_t rmcm_shaper_rama_exp_region_end_b; + uint32_t rmcm_shaper_rama_exp_region_end_base_b; +}; + struct mpcc_sm_cfg { bool enable; /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */ @@ -301,6 +337,7 @@ struct mpcc_state { uint32_t rgam_mode; uint32_t rgam_lut; struct mpc_grph_gamut_adjustment gamut_remap; + struct mpc_rmcm_regs rmcm_regs; }; /** @@ -1038,6 +1075,11 @@ struct mpc_funcs { */ void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id); + /** + * @mcm: + * + * MPC MCM new HW sequential programming functions + */ struct { void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id); void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id); @@ -1050,6 +1092,11 @@ struct mpc_funcs { bool lut_bank_a, int mpcc_id); } mcm; + /** + * @rmcm: + * + * MPC RMCM new HW sequential programming functions + */ struct { void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id); void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index 00ea3864dd4d..44f86cc2d1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -46,6 +46,8 @@ struct pg_cntl_funcs { void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on); void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe7f3137f228..27f950ae45ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -117,6 +117,7 @@ struct stream_encoder { uint32_t stream_enc_inst; struct vpg *vpg; struct afmt *afmt; + struct apg *apg; }; struct enc_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 7d16351bba99..f2503402c10e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -144,9 +144,9 @@ struct link_service { uint32_t (*dp_link_bandwidth_kbps)( const struct dc_link *link, const struct dc_link_settings *link_settings); - bool (*validate_dpia_bandwidth)( - const struct dc_stream_state *stream, - const unsigned int num_streams); + enum dc_status (*validate_dp_tunnel_bandwidth)( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t (*dp_required_hblank_size_bytes)( const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 96febabf464a..2956c2b3ad1a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -34,6 +34,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "clk_mgr.h" #define DC_LOGGER \ link->ctx->logger @@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link, { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; + struct dc_stream_update stream_update = { 0 }; + bool dpms_off = false; + bool needs_divider_update = false; bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state); bool is_hpo_acquired; uint8_t count; int i; + struct audio_output audio_output[MAX_PIPES]; + + needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != + link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); udelay(100); @@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link, link->dc, state, pipes[i]); + + // Disable OTG and re-enable after updating clocks + pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg); } - if (link->dc->hwss.setup_hpo_hw_control) { - is_hpo_acquired = resource_is_hpo_acquired(state); - if (was_hpo_acquired != is_hpo_acquired) - link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) { + link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link, + link_setting, count, + *pipes, &audio_output[0]); + for (i = 0; i < count; i++) { + pipes[i]->clock_source->funcs->program_pix_clk( + pipes[i]->clock_source, + &pipes[i]->stream_res.pix_clk_params, + link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings), + &pipes[i]->pll_settings); + + if (pipes[i]->stream_res.audio != NULL) { + const struct link_hwss *link_hwss = get_link_hwss( + link, &pipes[i]->link_res); + + link_hwss->setup_audio_output(pipes[i], &audio_output[i], + pipes[i]->stream_res.audio->inst); + + pipes[i]->stream_res.audio->funcs->az_configure( + pipes[i]->stream_res.audio, + pipes[i]->stream->signal, + &audio_output[i].crtc_info, + &pipes[i]->stream->audio_info, + &audio_output[i].dp_link_info); + + if (link->dc->config.disable_hbr_audio_dp2 && + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio && + link->dc->link_srv->dp_is_128b_132b_signal(pipes[i])) + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio); + } + } } - for (i = count-1; i >= 0; i--) - link_set_dpms_on(state, pipes[i]); + // Toggle on HPO I/O if necessary + is_hpo_acquired = resource_is_hpo_acquired(state); + if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control) + link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + + for (i = 0; i < count; i++) + pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg); + + // Set DPMS on with stream update + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) { + stream_update.stream = state->streams[i]; + stream_update.dpms_off = &dpms_off; + dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update); + } } static void dp_test_send_link_training(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 116ff37126e7..55c5148de800 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { - uint8_t clk_src = 0x4C; + uint8_t clk_src = 0xC4; uint8_t pattern = 0x4F; /* SQ128 */ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 9655e6fa53a4..827b630daf49 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link, if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - if (!detect_dp_sink_caps(link)) + if (!detect_dp_sink_caps(link)) { return false; + } if (is_dp_branch_device(link)) /* DP SST branch */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 273a3be6d593..8c8682f743d6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) } } - if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) && + (link->type != dc_connection_none)) dpcd_write_rx_power_ctrl(link, false); } } @@ -842,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } @@ -2296,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; } - /* get dp overhead for dp tunneling */ - link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link); req_bw += link->dpia_bw_alloc_config.dp_overhead; link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw); @@ -2537,6 +2537,14 @@ void link_set_dpms_on( !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; update_psp_stream_config(pipe_ctx, false); + + if (link->is_dds) { + uint32_t post_oui_delay = 30; // 30ms + + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + } + return; } @@ -2629,6 +2637,15 @@ void link_set_dpms_on( dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); + /* Corruption was observed on systems with display mux when stream gets + * enabled after the mux switch. Having a small delay between link + * training and stream unblank resolves the corruption issue. + * This is workaround. + */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + link->is_display_mux_present) + msleep(20); + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 1a04f4b74585..de1143dbbd25 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv) { link_srv->validate_mode_timing = link_validate_mode_timing; link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; - link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; + link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth; link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes; } @@ -539,10 +539,16 @@ static bool construct_phy(struct dc_link *link, break; case CONNECTOR_ID_EDP: + // If smartmux is supported, only create the link on the primary eDP. + // Dual eDP is not supported with smartmux. + if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0)) + goto create_fail; + link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { - if (!link->dc->config.allow_edp_hotplug_detection) + if (!link->dc->config.allow_edp_hotplug_detection + && !is_smartmux_suported(link)) link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; switch (link->dc->config.allow_edp_hotplug_detection) { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 29606fda029d..aecaf37eee35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing( if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) return false; break; + case PIXEL_ENCODING_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; default: /* Invalid Pixel Encoding*/ return false; @@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_max_bpc < 12) return false; break; + case COLOR_DEPTH_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: @@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps( return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; } +static uint32_t dp_get_timing_bandwidth_kbps( + const struct dc_crtc_timing *timing, + const struct dc_link *link) +{ + return dc_bandwidth_in_kbps_from_timing(timing, + dc_link_get_highest_encoding_format(link)); +} + static bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing( return DC_OK; } +static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context, + const struct dc_stream_state *stream) +{ + int i; + const struct dc_tunnel_settings *dp_tunnel_settings = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) { + dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings; + break; + } + } + + return dp_tunnel_settings; +} + /* - * This function calculates the bandwidth required for the stream timing - * and aggregates the stream bandwidth for the respective dpia link - * - * @stream: pointer to the dc_stream_state struct instance - * @num_streams: number of streams to be validated + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * return: true if validation is succeeded + * return: dc_status */ -bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) +enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - int bw_needed[MAX_DPIA_NUM] = {0}; - struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; - int num_dpias = 0; - - for (unsigned int i = 0; i < num_streams; ++i) { - if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { - /* new dpia sst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; + struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 }; + uint8_t link_count = 0; + enum dc_status result = DC_OK; - dpia_link[num_dpias] = stream[i].link; - bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[num_dpias])); - num_dpias++; - } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - uint8_t j = 0; - /* check whether its a known dpia link */ - for (; j < num_dpias; ++j) { - if (dpia_link[j] == stream[i].link) - break; - } + // Iterate through streams in the new context + for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) { + const struct dc_stream_state *stream = new_ctx->streams[i]; + const struct dc_link *link; + const struct dc_tunnel_settings *dp_tunnel_settings; + uint32_t timing_bw; + + if (stream == NULL) + continue; + + link = stream->link; + + if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT + || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + && link->hpd_status)) + continue; - if (j == num_dpias) { - /* new dpia mst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; - else { - dpia_link[j] = stream[i].link; - num_dpias++; - } + dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream); + + if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false)) + continue; + + timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link); + + // Find an existing entry for this 'link' in 'dpia_link_sets' + for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) { + bool is_new_slot = false; + + if (dpia_link_sets[j].link == NULL) { + is_new_slot = true; + link_count++; + dpia_link_sets[j].required_bw = 0; + dpia_link_sets[j].link = link; } - bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[j])); + if (is_new_slot || (dpia_link_sets[j].link == link)) { + dpia_link_sets[j].tunnel_settings = dp_tunnel_settings; + dpia_link_sets[j].required_bw += timing_bw; + break; + } } } - /* Include dp overheads */ - for (uint8_t i = 0; i < num_dpias; ++i) { - int dp_overhead = 0; - - dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]); - bw_needed[i] += dp_overhead; - } + if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false) + result = DC_FAIL_DP_TUNNEL_BW_VALIDATE; - return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias); + return result; } struct dp_audio_layout_config { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index bf398c49c3e8..9553c81053fe 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, const struct dc_crtc_timing *timing); -bool link_validate_dpia_bandwidth( - const struct dc_stream_state *stream, - const unsigned int num_streams); +enum dc_status link_validate_dp_tunnel_bandwidth( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t dp_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 0f965380a9b4..651926e547b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1388,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link) struct dpcd_amd_signature amd_signature = {0}; struct dpcd_amd_device_id amd_device_id = {0}; + if (link->is_dds) { + uint8_t dpcd_dp_edp_backlight_mode = 0; + + /* + * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register + * if platform is DDS + */ + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + dpcd_dp_edp_backlight_mode &= ~0x3; + + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + } + amd_device_id.device_id_byte1 = (uint8_t)(link->ctx->asic_id.chip_id); amd_device_id.device_id_byte2 = @@ -1543,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) { + link->dpcd_sink_ext_caps.raw = 0; + return false; + } if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) return false; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 22bfdced64ab..9b2f1a7da1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -75,12 +75,15 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) { status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY, - dpcd_dp_tun_data, 1); + dpcd_dp_tun_data, 2); if (status != DC_OK) goto err; - link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0]; + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = + dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY]; + link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY]; } DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) " @@ -155,8 +158,14 @@ void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling; if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc - && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { dp_tunnel_setting->should_use_dp_bw_allocation = true; + dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F; + dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id; + dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw; + dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw; + dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity; + } } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 3af7564a84f1..819bf2d8ba53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -35,6 +35,8 @@ #define Kbps_TO_Gbps (1000 * 1000) +#define MST_TIME_SLOT_COUNT 64 + // ------------------------------------------------------------------ // PRIVATE FUNCTIONS // ------------------------------------------------------------------ @@ -160,78 +162,6 @@ static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link) link->dpia_bw_alloc_config.nrd_max_lane_count); } -static uint8_t get_lowest_dpia_index(struct dc_link *link) -{ - const struct dc *dc_struct = link->dc; - uint8_t idx = 0xFF; - int i; - - for (i = 0; i < MAX_LINKS; ++i) { - - if (!dc_struct->links[i] || - dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - if (idx > dc_struct->links[i]->link_index) { - idx = dc_struct->links[i]->link_index; - break; - } - } - - return idx; -} - -/* - * Get the maximum dp tunnel banwidth of host router - * - * @dc: pointer to the dc struct instance - * @hr_index: host router index - * - * return: host router maximum dp tunnel bandwidth - */ -static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index) -{ - uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]); - uint8_t hr_index_temp = 0; - struct dc_link *link_dpia_primary, *link_dpia_secondary; - int total_bw = 0; - - for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { - - if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2; - - if (hr_index_temp == hr_index) { - link_dpia_primary = dc->links[i]; - link_dpia_secondary = dc->links[i + 1]; - - /** - * If BW allocation enabled on both DPIAs, then - * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary) - * otherwise HR BW = Estimated(bw alloc enabled dpia) - */ - if ((link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) && - (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) { - total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw + - link_dpia_secondary->dpia_bw_alloc_config.allocated_bw; - } else if (link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw; - } else if (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw; - } - break; - } - } - - return total_bw; -} - /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up @@ -251,32 +181,40 @@ static void dpia_bw_alloc_unplug(struct dc_link *link) static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw) { - uint8_t requested_bw; - uint32_t temp; + uint8_t request_reg_val; + uint32_t temp, request_bw; - /* Error check whether request bw greater than allocated */ - if (req_bw > link->dpia_bw_alloc_config.estimated_bw) { - DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n", - __func__, link->link_index); - req_bw = link->dpia_bw_alloc_config.estimated_bw; + if (link->dpia_bw_alloc_config.bw_granularity == 0) { + DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index); + return; } temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; - requested_bw = temp / Kbps_TO_Gbps; + request_reg_val = temp / Kbps_TO_Gbps; /* Always make sure to add more to account for floating points */ if (temp % Kbps_TO_Gbps) - ++requested_bw; + ++request_reg_val; - /* Error check whether requested and allocated are equal */ - req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { - DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n", - __func__, link->link_index); + request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + if (request_bw > link->dpia_bw_alloc_config.estimated_bw) { + DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!", + __func__, link->link_index, + req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw); + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + request_reg_val = temp / Kbps_TO_Gbps; + if (temp % Kbps_TO_Gbps) + ++request_reg_val; } + link->dpia_bw_alloc_config.allocated_bw = request_bw; + DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw); + core_link_write_dpcd(link, REQUESTED_BW, - &requested_bw, + &request_reg_val, sizeof(uint8_t)); } @@ -304,14 +242,16 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) link->dpia_bw_alloc_config.bw_alloc_enabled = true; ret = true; - /* - * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other - * DPIA. CM release preallocation only when allocation is complete. Do zero alloc - * to make the CM to release preallocation and update estimated BW correctly for - * all DPIAs per host router - */ - // TODO: Zero allocation can be removed once the MSFT CM fix has been released - link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); + if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) { + /* + * During DP tunnel creation, the CM preallocates BW + * and reduces the estimated BW of other DPIAs. + * The CM releases the preallocation only when the allocation is complete. + * Perform a zero allocation to make the CM release the preallocation + * and correctly update the estimated BW for all DPIAs per host router. + */ + link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); + } } else DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); } @@ -329,19 +269,17 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } @@ -374,9 +312,13 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { - DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n", + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + + DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d", __func__, link->link_index, link->hpd_status, - link->dpia_bw_alloc_config.allocated_bw, req_bw); + link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.estimated_bw, + req_bw); if (link_dp_is_bw_alloc_available(link)) link_dpia_send_bw_alloc_request(link, req_bw); @@ -384,73 +326,116 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__); } -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link) { - bool ret = true; - int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0; - uint8_t lowest_dpia_index, i, hr_index; + uint32_t link_dp_overhead = 0; - if (!num_dpias || num_dpias > MAX_DPIA_NUM) - return ret; + if ((link->type == dc_connection_mst_branch) && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH + * MST overhead is 1/64 of link bandwidth (excluding any overhead) + */ + const struct dc_link_settings *link_cap = dc_link_get_link_cap(link); - lowest_dpia_index = get_lowest_dpia_index(link[0]); + if (link_cap) { + uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * + (uint32_t)link_cap->lane_count * + LINK_RATE_REF_FREQ_IN_KHZ * 8; + link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT) + + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0); + } + } - /* get total Host Router BW with granularity for the given modes */ - for (i = 0; i < num_dpias; ++i) { - int granularity_Gbps = 0; - int bw_granularity = 0; + return link_dp_overhead; +} - if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) - continue; +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * And then validate if the required bandwidth is within the router's capacity. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count) +{ + uint32_t granularity_Gbps; + const struct dc_link *link; + uint32_t link_bw_granularity; + uint32_t link_required_bw; + struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 }; + uint8_t i; + bool is_success = true; + uint8_t router_count = 0; + + if ((dpia_link_sets == NULL) || (count == 0)) + return is_success; + + // Iterate through each DP tunneling link (DPIA). + // Aggregate its bandwidth requirements onto the respective USB4 router. + for (i = 0; i < count; i++) { + link = dpia_link_sets[i].link; + link_required_bw = dpia_link_sets[i].required_bw; + const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings; + + if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0) + break; - if (link[i]->link_index < lowest_dpia_index) - continue; + if (link->type == dc_connection_mst_branch) + link_required_bw += link_dpia_get_dp_overhead(link); - granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity); - bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps + - ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0); + granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity); + link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps + + ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0); - hr_index = (link[i]->link_index - lowest_dpia_index) / 2; - bw_needed_per_hr[hr_index] += bw_granularity; - } + // Find or add the USB4 router associated with the current DPIA link + for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) { + if (router_sets[j].is_valid == false) { + router_sets[j].is_valid = true; + router_sets[j].cm_id = dp_tunnel_settings->cm_id; + router_count++; + } - /* validate against each Host Router max BW */ - for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) { - if (bw_needed_per_hr[hr_index]) { - host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index); - if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) { - ret = false; + if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) { + uint32_t remaining_bw = + dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw; + + router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw; + + if (remaining_bw > router_sets[j].remaining_bw) + router_sets[j].remaining_bw = remaining_bw; + + // Get the max estimated BW within the same CM_ID + if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw) + router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw; + + router_sets[j].required_bw += link_bw_granularity; + router_sets[j].dpia_count++; break; } } } - return ret; -} + // Validate bandwidth for each unique router found. + for (i = 0; i < router_count; i++) { + uint32_t total_bw = 0; -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link) -{ - int dp_overhead = 0, link_mst_overhead = 0; + if (router_sets[i].is_valid == false) + break; - if (!link_dp_is_bw_alloc_available(link)) - return dp_overhead; + // Determine the total available bandwidth for the current router based on aggregated data + if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0)) + total_bw = router_sets[i].estimated_bw; + else + total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw; - /* if its mst link, add MTPH overhead */ - if ((link->type == dc_connection_mst_branch) && - !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH - * MST overhead is 1/64 of link bandwidth (excluding any overhead) - */ - const struct dc_link_settings *link_cap = - dc_link_get_link_cap(link); - uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * - (uint32_t)link_cap->lane_count * - LINK_RATE_REF_FREQ_IN_KHZ * 8; - link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0); + if (router_sets[i].required_bw > total_bw) { + is_success = false; + break; + } } - /* add all the overheads */ - dp_overhead = link_mst_overhead; - - return dp_overhead; + return is_success; } + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 801965b5f9a4..41efcb3e44e2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -28,10 +28,6 @@ #include "link.h" -/* Number of Host Routers per motherboard is 2 */ -#define MAX_HR_NUM 2 -/* Number of DPIA per host router is 2 */ -#define MAX_DPIA_NUM (MAX_HR_NUM * 2) /* * Host Router BW type @@ -42,6 +38,16 @@ enum bw_type { HOST_ROUTER_BW_INVALID, }; +struct usb4_router_validation_set { + bool is_valid; + uint8_t cm_id; + uint8_t dpia_count; + uint32_t required_bw; + uint32_t allocated_bw; + uint32_t estimated_bw; + uint32_t remaining_bw; +}; + /* * Enable USB4 DP BW allocation mode * @@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); /* - * Handle the validation of total BW here and confirm that the bw used by each - * DPIA doesn't exceed available BW for each host router (HR) - * - * @link[]: array of link pointer to all possible DPIA links - * @bw_needed[]: bw needed for each DPIA link based on timing - * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE - */ -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); - -/* * Obtain all the DP overheads in dp tunneling for the dpia link * * @link: pointer to the dc_link struct instance * * return: DP overheads in DP tunneling */ -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link); /* * Handle DP BW allocation status register @@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status); +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count); + #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index da74c2b5854f..e7927b8f5ba3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds && !link->dpcd_caps.panel_luminance_control) + return true; + // use internal backlight control if dmub capabilities are not present if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && !link->dc->caps.dmub_caps.aux_backlight_support) { @@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + //make sure we disable AMD ABC first. + core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, sizeof(uint8_t)); + if (backlight_enable) { + backlight_enable = 0; + core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, 1); + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(uint8_t)); @@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; uint8_t backlight_control = isHDR ? 1 : 0; + uint8_t backlight_enable = 0; + // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; + //make sure we disable VESA ABC first. + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) { + backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(backlight_enable)); + } + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) @@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) @@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable) link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return true; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; @@ -916,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link); link->replay_settings.replay_allow_active = *allow_active; } @@ -1173,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } +bool is_smartmux_suported(struct dc_link *link) +{ + if (link->dc->caps.is_apu) + return false; + if (!link->dc->config.smart_mux_version) + return false; + + return true; +} + static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, struct link_resource *link_res, bool enable) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index bcfa6ac5d4e7..4a475d5b9dde 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool is_smartmux_suported(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile index 1e2e66508192..5402c3529f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile @@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401)) AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401) -endif +endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index b4cea2b8cb2a..6f0e017a8ae2 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -30,7 +30,6 @@ #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #include "dc.h" -#include "dcn401/dcn401_mpc.h" #define REG(reg)\ mpc30->mpc_regs->reg @@ -879,7 +878,7 @@ void mpc32_set3dlut_ram10( } -static void mpc32_set_3dlut_mode( +void mpc32_set_3dlut_mode( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -1022,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, - .set_movable_cm_location = mpc401_set_movable_cm_location, - .populate_lut = mpc401_populate_lut, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h index 9622518826c9..8c9b20bcca85 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h @@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram( enum dc_lut_mode mode, bool is_color_channel_12bits, uint32_t mpcc_id); + +void mpc32_set_3dlut_mode( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17, + uint32_t mpcc_id); + #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index 98cf0cbd59ba..f3fb3fe13757 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -294,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id) REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1); } -static void program_gamut_remap( +void mpc_program_gamut_remap( struct mpc *mpc, unsigned int mpcc_id, const uint16_t *regval, @@ -426,7 +426,7 @@ void mpc401_set_gamut_remap( if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) { /* Bypass / Disable if type is bypass or hw */ - program_gamut_remap(mpc, mpcc_id, NULL, + mpc_program_gamut_remap(mpc, mpcc_id, NULL, adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0); } else { struct fixed31_32 arr_matrix[12]; @@ -460,12 +460,12 @@ void mpc401_set_gamut_remap( else mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2; - program_gamut_remap(mpc, mpcc_id, arr_reg_val, + mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, mode_select); } } -static void read_gamut_remap(struct mpc *mpc, +void mpc_read_gamut_remap(struct mpc *mpc, int mpcc_id, uint16_t *regval, enum mpcc_gamut_remap_id gamut_remap_block_id, @@ -561,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc, struct mpc_grph_gamut_adjustment *adjust) { uint16_t arr_reg_val[12] = {0}; - uint32_t mode_select; + uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0; - read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); + mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) { adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h index 8e35ebc603a9..eb0c68d0b0c7 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h @@ -241,6 +241,19 @@ void mpc401_update_3dlut_fast_load_select( int mpcc_id, int hubp_idx); +void mpc_program_gamut_remap( + struct mpc *mpc, + unsigned int mpcc_id, + const uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + enum mpcc_gamut_remap_mode_select mode_select); + +void mpc_read_gamut_remap(struct mpc *mpc, + int mpcc_id, + uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + uint32_t *mode_select); + void mpc401_update_3dlut_fast_load_select( struct mpc *mpc, int mpcc_id, diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index f2ba76c1e0c0..782316348941 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -31,6 +31,7 @@ #include <linux/kgdb.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/vmalloc.h> #include <asm/byteorder.h> diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 84f73fdb0f95..3a51be63f020 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -839,7 +839,7 @@ static enum dc_status build_mapped_resource( static enum dc_status dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index f3d5baac11bf..cccde5a6f3cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -963,7 +963,7 @@ static enum dc_status build_mapped_resource( static enum dc_status dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 4225cae68c10..164ba796f64c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -886,7 +886,7 @@ static enum dc_status build_mapped_resource( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 6221d749246d..3efc4c55d2d2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -45,7 +45,7 @@ enum dc_status dce112_validate_with_context( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dce112_add_stream_to_ctx( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index d9ffdded5ce1..58b59d52dc9d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -866,7 +866,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool) static enum dc_status dce60_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index bd5811f97531..3e8b0ac11d90 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -872,7 +872,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool) static enum dc_status dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index be4ade0853e9..652c05c35494 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -1129,12 +1129,12 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) static enum dc_status dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; DC_FP_START(); - voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); + voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode); DC_FP_END(); return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 3405be07f5e3..f9cbdad3ef37 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -2125,7 +2125,7 @@ validate_out: } enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -2135,7 +2135,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index c0e062c7407d..e997d35a8b86 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params( struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode); void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context); @@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 43fa2cb117f3..e4a1338d21e0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 9ab01b65b177..918742a42ded 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -924,7 +924,7 @@ validate_out: * dcn20_validate_bandwidth in dcn20_resource.c. */ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -934,7 +934,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2f7..a017fd9854d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h @@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index f631ae34e320..895349d9ca07 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; DC_FP_START(); - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only) { bool out = false; @@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw( context->bw_ctx.dml.vba.VoltageLevel = 0; context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw( dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - if (!fast_validate || !allow_self_refresh_only) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) { /* * DML favors voltage over p-state, but we're more interested in * supporting p-state over voltage. We can't support p-state in @@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw( vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } if (allow_self_refresh_only && - (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { /* * If mode is unsupported or there's still no p-state support @@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw( } if (repopulate_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); context->bw_ctx.dml.vba.VoltageLevel = vlevel; *vlevel_out = vlevel; *pipe_cnt_out = pipe_cnt; @@ -2037,7 +2037,7 @@ void dcn30_calculate_wm_and_dlg( enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -2055,7 +2055,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); if (pipe_cnt == 0) @@ -2066,7 +2066,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -2586,6 +2586,8 @@ static bool dcn30_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 689d9bdace81..2c967fe55712 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -57,14 +57,14 @@ unsigned int dcn30_calc_max_scaled_time( unsigned int urgent_watermark); enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_acquire_post_bldn_3dlut( struct resource_context *res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 121a86a59833..82a205a7c25c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 012c5fd52cb1..3345068a878c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index a8d0b4686f9a..3479e1eab4cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 51ca0b2959fc..3ed7f50554e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format) int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.gpuvm = 1; @@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc, int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context( bool upscaled = false; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1760,7 +1760,7 @@ dcn31_set_mcif_arb_params(struct dc *dc, enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1778,19 +1778,19 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -2202,6 +2204,8 @@ static bool dcn31_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; @@ -2231,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool( kfree(pool); return NULL; } + +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output) +{ + struct dc_state *state = link->dc->current_state; + int i; + +#if defined(CONFIG_DRM_AMD_DC_FP) + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) + link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]); + + for (i = 0; i < pipe_count; i++) { + link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]); + + // Setup audio + if (pipes[i].stream_res.audio != NULL) + build_audio_output(state, &pipes[i], &audio_output[i]); + } +#else + /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching, + * but the above will not compile on architectures without an FPU. + */ + DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__); + ASSERT(0); +#endif + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index dd82815d7efe..c32c85ef0ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -39,7 +39,7 @@ struct dcn31_resource_pool { enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg( int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, @@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool( unsigned int dcn31_get_det_buffer_size( const struct dc_state *context); +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 8383e2e59be5..de708fdc1e80 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -1667,12 +1667,12 @@ static struct clock_source *dcn31_clock_source_create( static int dcn314_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt; DC_FP_START(); - pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); + pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); DC_FP_END(); return pipe_cnt; @@ -1696,7 +1696,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1715,19 +1715,19 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc, DC_FP_START(); // do not support self refresh only - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1779,7 +1779,9 @@ static struct resource_funcs dcn314_res_pool_funcs = { .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -2117,6 +2119,8 @@ static bool dcn314_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index f8ba531d6342..ac9bb7f097d5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -41,7 +41,7 @@ struct dcn314_resource_pool { enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 6c2bb3f63be1..82cc78c291d8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; @@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context( bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { @@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = { .get_panel_config_defaults = dcn315_get_panel_config_defaults, .get_power_profile = dcn315_get_power_profile, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn315_resource_construct( @@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 568094827212..636110e48d01 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format) static int dcn316_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context( const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn316_resource_construct( @@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index bb0dae0be5b8..9917b366f00c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1742,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, } } -static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) +static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode) { bool out = false; @@ -1767,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val goto validate_fail; DC_FP_START(); - out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode); DC_FP_END(); if (pipe_cnt == 0) @@ -1778,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1809,7 +1809,7 @@ validate_out: enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status; @@ -1827,11 +1827,11 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1846,14 +1846,14 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1862,7 +1862,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1878,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context( int num_subvp_none = 0; int odm_slice_count; - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); /* For single display subvp, look for subvp main so if we have phantom * pipe, we can set odm policy to match main pipe @@ -1960,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context( /* Only populate DML input with subvp info for full updates. * This is just a workaround -- needs a proper fix. */ - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; @@ -2061,21 +2061,15 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -2257,7 +2251,7 @@ static bool dcn32_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -2276,6 +2270,7 @@ static bool dcn32_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2505,6 +2500,8 @@ static bool dcn32_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2519,7 +2516,6 @@ static bool dcn32_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2551,6 +2547,10 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index d60ed77eda80..82f966cf4ed2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -100,12 +100,12 @@ void dcn32_add_phantom_pipes(struct dc *dc, enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 7db1f7a5613f..061c0907d802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1761,8 +1755,8 @@ static bool dcn321_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; @@ -1780,6 +1774,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2004,6 +1999,8 @@ static bool dcn321_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2018,7 +2015,6 @@ static bool dcn321_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2046,6 +2042,10 @@ static bool dcn321_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index e01aa2f2e13e..8475c6eec547 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1734,15 +1734,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn35_resource_construct( @@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1893,6 +1895,10 @@ static bool dcn35_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2151,12 +2157,13 @@ static bool dcn35_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 4ebe4e00a4f8..0971c0f74186 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1714,15 +1714,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn351_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn351_resource_construct( @@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1865,6 +1867,10 @@ static bool dcn351_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2122,13 +2128,14 @@ static bool dcn351_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index db36b8f9ce65..8bae7fcedc22 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1715,15 +1715,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn36_resource_construct( @@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1866,6 +1868,10 @@ static bool dcn36_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; @@ -2124,12 +2130,13 @@ static bool dcn36_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index f420c4dafa03..b3988e38d0a6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -70,7 +70,6 @@ #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml/dcn401/dcn401_fpu.h" #include "dc_state_priv.h" @@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = { static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - /* re-calculate the available MALL size if required */ if (bw_params->num_channels > 0) { dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( @@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b DC_FP_START(); - dcn401_update_bw_bounding_box_fpu(dc, bw_params); - - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1644,7 +1635,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status = DC_OK; @@ -1662,9 +1653,9 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1679,12 +1670,12 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1957,8 +1948,30 @@ static bool dcn401_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->config.use_spl = true; dc->config.prefer_easf = true; + + dc->config.dcn_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_sharpness_range.sdr_rgb_max = 1750; + dc->config.dcn_sharpness_range.sdr_rgb_mid = 750; + dc->config.dcn_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500; + + dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250; + dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250; + dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500; + dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ @@ -2177,6 +2190,8 @@ static bool dcn401_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2195,7 +2210,6 @@ static bool dcn401_resource_construct( dc->config.sdpif_request_limit_words_per_umc = 16; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.map_dc_pipes_with_callbacks = true; @@ -2228,6 +2242,10 @@ static bool dcn401_resource_construct( /* SPL */ dc->caps.scl_caps.sharpener_support = true; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index dc52a30991af..2ae6831c31ef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -24,7 +24,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index e0008c5f08ad..55b929ca7982 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; struct spl_rect mpc_rec; - if (use_recout_width_aligned) { + if (spl_in->basic_in.custom_width != 0) { + mpc_rec.width = spl_in->basic_in.custom_width; + mpc_rec.x = spl_in->basic_in.custom_x; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + } else if (use_recout_width_aligned) { mpc_rec.width = recout_width_align; if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) { mpc_rec.width = plane_clip_rec->width % recout_width_align; @@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( /* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as: */ - if (mpc_slice_idx > epimo) { + if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) { mpc_rec.x += mpc_slice_idx - epimo - 1; mpc_rec.width += 1; } @@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i odm_rec.x = odm_slice_width * odm_slice_idx; odm_rec.width = is_last_odm_slice ? - /* last slice width is the reminder of h_active */ - h_active - odm_slice_width * (odm_slice_count - 1) : - /* odm slice width is the floor of h_active / count */ - odm_slice_width; + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; odm_rec.y = 0; odm_rec.height = v_active; @@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, /* Calculate number of tap with adaptive scaling off */ static void spl_get_taps_non_adaptive_scaler( - struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale) + struct spl_scratch *spl_scratch, + const struct spl_taps *in_taps, + bool is_subsampled) { bool check_max_downscale = false; @@ -945,14 +952,15 @@ static void spl_get_taps_non_adaptive_scaler( SPL_ASSERT(check_max_downscale); - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) spl_scratch->scl_data.taps.h_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled) spl_scratch->scl_data.taps.v_taps_c = 1; + } /* Calculate optimal number of taps */ @@ -965,15 +973,13 @@ static bool spl_get_optimal_number_of_taps( unsigned int max_taps_y, max_taps_c; unsigned int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - bool skip_easf = false; - bool always_scale = spl_in->basic_out.always_scale; + bool skip_easf = false; bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format); - if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && spl_scratch->scl_data.viewport.width > max_downscale_src_width) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -982,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( /* Disable adaptive scaler and sharpener when integer scaling is enabled */ if (spl_in->scaling_quality.integer_scaling) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -997,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + if (skip_easf) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); + } else { if (spl_is_video_format(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; @@ -1124,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps( (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) { spl_scratch->scl_data.taps.h_taps = 1; spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; @@ -1149,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps( if ((!*enable_easf_v) && !is_subsampled && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) spl_scratch->scl_data.taps.v_taps_c = 1; + } } return true; diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 36a284305a70..23d254dea18f 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -460,6 +460,8 @@ struct basic_in { enum spl_color_space color_space; // Color Space unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this? + int custom_width; // Width for non-standard segmentation - used when != 0 + int custom_x; // Start x for non-standard segmentation - used when custom_width != 0 }; // Basic output information |