diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/clk_mgr')
39 files changed, 2943 insertions, 391 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index 1c443e549afa..d9955c5d2e5e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -83,7 +83,6 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10) - ############################################################################### # DCN20 ############################################################################### @@ -175,9 +174,18 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32) ############################################################################### # DCN35 ############################################################################### -CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o +CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN35) + +############################################################################### +# DCN401 +############################################################################### +CLK_MGR_DCN401 = dcn401_clk_mgr.o dcn401_clk_mgr_smu_msg.o + +AMD_DAL_CLK_MGR_DCN401 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn401/,$(CLK_MGR_DCN401)) + +AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN401) endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 9f0f25aee426..4c3e58c730b1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -48,6 +48,7 @@ #include "dcn316/dcn316_clk_mgr.h" #include "dcn32/dcn32_clk_mgr.h" #include "dcn35/dcn35_clk_mgr.h" +#include "dcn401/dcn401_clk_mgr.h" int clk_mgr_helper_get_active_display_cnt( struct dc *dc, @@ -58,6 +59,7 @@ int clk_mgr_helper_get_active_display_cnt( display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; + const struct dc_stream_status *stream_status = &context->stream_status[i]; /* Don't count SubVP phantom pipes as part of active * display count @@ -65,13 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - /* - * Only notify active stream or virtual stream. - * Need to notify virtual stream to work around - * headless case. HPD does not fire when system is in - * S0i2. - */ - if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) + if (!stream->dpms_off || (stream_status && stream_status->plane_count)) display_count++; } @@ -272,7 +268,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (ctx->dce_version == DCN_VERSION_2_01) { dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } @@ -329,15 +325,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; case AMDGPU_FAMILY_GC_11_0_0: { - struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - - if (clk_mgr == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base; + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; } case AMDGPU_FAMILY_GC_11_0_1: { @@ -360,12 +355,26 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p BREAK_TO_DEBUGGER(); return NULL; } + if (ctx->dce_version == DCN_VERSION_3_51) + dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + else + dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base.base; } break; + case AMDGPU_FAMILY_GC_12_0_0: { + struct clk_mgr_internal *clk_mgr = dcn401_clk_mgr_construct(ctx, dccg); + + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + return &clk_mgr->base; + } + break; #endif /* CONFIG_DRM_AMD_DC_FP */ default: ASSERT(0); /* Unknown Asic */ @@ -420,6 +429,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) case AMDGPU_FAMILY_GC_11_5_0: dcn35_clk_mgr_destroy(clk_mgr); break; + case AMDGPU_FAMILY_GC_12_0_0: + dcn401_clk_mgr_destroy(clk_mgr); + break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index b77804cfde0f..26feefbb8990 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -132,7 +132,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) int dprefclk_wdivider; int dprefclk_src_sel; int dp_ref_clk_khz; - int target_div = 600000; + int target_div; /* ASSERT DP Reference Clock source is from DFS*/ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 78df96882d6e..f8409453434c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -195,7 +195,7 @@ void dce11_pplib_apply_display_requirements( * , then change minimum memory clock based on real-time bandwidth * limitation. */ - if ((dc->ctx->asic_id.chip_family == FAMILY_AI) && + if (dc->bw_vbios && (dc->ctx->asic_id.chip_family == FAMILY_AI) && ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) { pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz, (uint32_t) div64_s64( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 2a74e2d74909..369421e46c52 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,7 +23,6 @@ * */ -#include "reg_helper.h" #include "core_types.h" #include "clk_mgr_internal.h" #include "rv1_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 89b79dd39628..d82a52319088 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -26,7 +26,6 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" -#include <linux/delay.h> #include "rv1_clk_mgr_vbios_smu.h" @@ -143,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di return actual_dispclk_set_mhz * 1000; } - -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h index 083cb3158859..81d7c912549c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h @@ -27,6 +27,5 @@ #define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 5ee87965a078..bb4f3bd7532e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 9c90090e7351..76c612ecfe3c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -34,8 +34,8 @@ #include "dm_services.h" #include "cyan_skillfish_ip_offset.h" -#include "dcn/dcn_2_0_3_offset.h" -#include "dcn/dcn_2_0_3_sh_mask.h" +#include "dcn/dcn_2_0_1_offset.h" +#include "dcn/dcn_2_0_1_sh_mask.h" #include "clk/clk_11_0_1_offset.h" #include "clk/clk_11_0_1_sh_mask.h" @@ -100,13 +100,19 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.dispclk_khz == 0 || dc->debug.force_clock_mode & 0x1) { + /* this is from resume or boot up, if forced_clock cfg option + * used, we bypass program dispclk and DPPCLK, but need set them + * for S3. + */ + force_reset = true; + /* force_clock_mode 0x1: force reset the clock even it is the + * same clock as long as it is in Passive level. + */ dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } - clk_mgr_helper_get_active_display_cnt(dc, context); - if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; @@ -150,11 +156,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr, context); } else { + // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) dcn20_update_clocks_update_dentist(clk_mgr, context); + // always update dtos unless clock is lowered and not safe to lower dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index e3e1940198a9..e18097f82091 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -320,16 +320,16 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (log_info->enabled) { @@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; /* Modify previous watermark range to cover up to max */ - ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + if (num_valid_sets > 0) + ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; } num_valid_sets++; } @@ -548,7 +549,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } @@ -642,7 +643,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params j = -1; - ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceed maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ @@ -770,7 +772,7 @@ void rn_clk_mgr_construct( status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); if (status == PP_SMU_RESULT_OK && - ctx->dc_bios && ctx->dc_bios->integrated_info) { + ctx->dc_bios->integrated_info) { rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); /* treat memory config as single channel if memory is asymmetrics. */ if (ctx->dc->config.is_asymmetric_memory) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 23b390245b5d..5a633333dbb5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis return actual_dispclk_set_mhz * 1000; } -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} - int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) { int actual_dcfclk_set_mhz = -1; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index 1ce19d875358..f76fad87f0e1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -30,7 +30,6 @@ enum dcn_pwr_state; int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz); int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3271c8c7905d..8083a553c60e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } @@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index bdbf18306698..3253115a153d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -23,7 +23,6 @@ * */ -#include <linux/delay.h> #include "dcn30_clk_mgr_smu_msg.h" #include "clk_mgr_internal.h" @@ -54,6 +53,7 @@ */ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un /* handle DALSMC_Result_CmdRejectedBusy? */ - /* Log? */ + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); return reg; } @@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000); if (IS_SMU_TIMEOUT(result)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h index 61bb1d86182e..1bfd6f66f035 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// This is a stripped-down version of the smu11_driver_if.h file for the relevant DAL interfaces. +/* Copyright © 2022-2024 Advanced Micro Devices, Inc. All rights reserved. */ #define SMU11_DRIVER_IF_VERSION 0x40 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index aa9fd1dc550a..9e2ef0e724fc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -252,16 +252,16 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (log_info->enabled) { @@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ @@ -730,7 +731,7 @@ void vg_clk_mgr_construct( clk_mgr->base.base.bw_params = &vg_bw_params; vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); - if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + if (ctx->dc_bios->integrated_info) { vg_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ce1386e22576..bc123f1884da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -50,12 +50,13 @@ #include "link.h" #include "logger_types.h" + + +#include "yellow_carp_offset.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "yellow_carp_offset.h" - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -562,7 +563,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ @@ -784,7 +786,7 @@ void dcn31_clk_mgr_construct( i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk, i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); } - if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + if (ctx->dc_bios->integrated_info) { dcn31_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 6904e95113c1..f201628e4e98 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -23,7 +23,6 @@ * */ -#include <linux/delay.h> #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index a84f1e376dee..91d872d6d392 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -53,9 +53,6 @@ #include "logger_types.h" -#undef DC_LOGGER -#define DC_LOGGER \ - clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 @@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; +#undef DC_LOGGER +#define DC_LOGGER \ + clk_mgr->base.base.ctx->logger #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -896,7 +896,7 @@ void dcn314_clk_mgr_construct( i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); } - if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { + if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { dcn314_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h index 047d19ea919c..78ca1e5c5e9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h @@ -37,34 +37,34 @@ typedef enum { } WCK_RATIO_e; typedef struct { - uint32_t FClk; - uint32_t MemClk; - uint32_t Voltage; - uint8_t WckRatio; - uint8_t Spare[3]; + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; } DfPstateTable314_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t VClocks[NUM_VCN_DPM_LEVELS]; - uint32_t DClocks[NUM_VCN_DPM_LEVELS]; - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; - uint8_t NumDcfClkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk - uint8_t NumSocClkLevelsEnabled; - uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk - uint8_t NumDfPstatesEnabled; - uint8_t spare[3]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; - uint32_t MinGfxClk; - uint32_t MaxGfxClk; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; } DpmClocks314_t; struct dcn314_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 644da4637320..e4d22f74f986 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, */ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; if (safe_to_lower) { + if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, false); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = dcn315_get_active_display_cnt_wa(dc, context); @@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } } } else { + if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, true); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { union display_idle_optimization_u idle_info = { 0 }; @@ -186,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; - if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -196,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - /* No need to apply the w/a if we haven't taken over from bios yet */ - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, true); + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + + dcn315_disable_otg_wa(clk_mgr_base, context, true); + + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, false); + dcn315_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } @@ -704,7 +714,7 @@ void dcn315_clk_mgr_construct( i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); } - if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + if (ctx->dc_bios->integrated_info) { dcn315_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 879f1494c4cd..478b4d6a3544 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -29,6 +29,7 @@ #include "dm_helpers.h" #include "dcn315_smu.h" #include "mp/mp_13_0_5_offset.h" +#include "logger_types.h" #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 @@ -48,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000 { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + +#define CTX clk_mgr->base.ctx +#define IND_REG(offset) offset #define regBIF_BX_PF2_RSMU_INDEX 0x0000 #define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 @@ -66,10 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define FN(reg_name, field) \ FD(reg_name##__##field) -#define REG_NBIO(reg_name) \ - (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) - -#include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ CTX->logger @@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define mmMP1_C2PMSG_3 0x3B1050C +#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF) +#define reg__MP1_C2PMSG_3__SHIFT (0) + + +#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF) +#define data_reg_name__MP1_C2PMSG_3__SHIFT (0) + #define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0 @@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param( for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) { /* Trigger the message transaction by writing the message ID */ - generic_write_indirect_reg(CTX, - REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), - mmMP1_C2PMSG_3, msg_id); - read_back_data = generic_read_indirect_reg(CTX, - REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), - mmMP1_C2PMSG_3); + IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0, + MP1_C2PMSG_3, msg_id); + IX_REG_GET_SYNC(mmMP1_C2PMSG_3, + MP1_C2PMSG_3, &read_back_data); if (read_back_data == msg_id) break; udelay(2); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 6ad4f4efec5d..49efea0c8fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < 100000) new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; @@ -485,7 +490,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ @@ -651,7 +657,7 @@ void dcn316_clk_mgr_construct( if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { dcn316_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); - if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + if (ctx->dc_bios->integrated_info) { dcn316_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index bec252e1dd27..084994c650c4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -29,18 +29,18 @@ #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" #include "link.h" #include "dc_state_priv.h" #include "atomfirmware.h" -#include "smu13_driver_if.h" +#include "dcn32_smu13_driver_if.h" #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" -#include "dcn32/dcn32_clk_mgr.h" #include "dml/dcn32/dcn32_fpu.h" #define DCN_BASE__INST0_SEG1 0x000000C0 @@ -163,9 +163,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); unsigned int num_levels; - struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + struct clk_limit_num_entries *num_entries_per_clk; unsigned int i; + if (!clk_mgr_base->bw_params) + return; + + num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); clk_mgr_base->clks.p_state_change_support = true; clk_mgr_base->clks.prev_p_state_change_support = true; @@ -173,9 +178,6 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) clk_mgr->smu_present = false; clk_mgr->dpm_present = false; - if (!clk_mgr_base->bw_params) - return; - if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver)) clk_mgr->smu_present = true; @@ -553,7 +555,7 @@ static void dcn32_auto_dpm_test_log( // // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" //////////////////////////////////////////////////////////////////////////// - if (new_clocks && active_pipe_count > 0 && + if (active_pipe_count > 0 && new_clocks->dramclk_khz > 0 && new_clocks->fclk_khz > 0 && new_clocks->dcfclk_khz > 0 && @@ -574,7 +576,7 @@ static void dcn32_auto_dpm_test_log( p_state_list[i] = curr_pipe_ctx->p_state_type; refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + - curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + curr_pipe_ctx->stream->timing.v_total * (uint64_t)curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); disp_src_refresh_list[i] = refresh_rate; @@ -712,8 +714,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, * since we calculate mode support based on softmax being the max UCLK * frequency. */ - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + if (dc->debug.disable_dc_mode_overwrite) { + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + } else + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); } else { dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); } @@ -746,8 +752,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ if (clk_mgr_base->clks.p_state_change_support && (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && - !dc->work_arounds.clock_update_disable_mask.uclk) + !dc->work_arounds.clock_update_disable_mask.uclk) { + if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite) + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, + max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz))); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); + } if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && clk_mgr_base->clks.num_ways > new_clocks->num_ways) { @@ -829,7 +840,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); - if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { + if (dc->config.enable_auto_dpm_test_logs) { dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } @@ -1199,11 +1210,19 @@ void dcn32_clk_mgr_construct( clk_mgr->smu_present = false; clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index df244b175fdb..cf2d35363e8b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -28,7 +28,7 @@ #include "clk_mgr_internal.h" #include "reg_helper.h" #include "dalsmc.h" -#include "smu13_driver_if.h" +#include "dcn32_smu13_driver_if.h" #define mmDAL_MSG_REG 0x1628A #define mmDAL_ARG_REG 0x16273 @@ -49,6 +49,7 @@ */ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un udelay(delay_us); } while (max_retries--); + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + return reg; } @@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) { if (param_out) @@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m *total_delay_us += delay_us; } while (max_retries--); + TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + return reg; } @@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { if (param_out) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index c76352a817de..5c44ab0e8667 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -37,10 +37,9 @@ #define DALSMC_Result_OK 0x1 void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); -void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h index d3d5a8caccf8..8d54865bbd5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -// This is a stripped-down version of the smu13_driver_if.h file for the relevant DAL interfaces. +/* Copyright © 2022-2024 Advanced Micro Devices, Inc. All rights reserved. */ #define SMU13_DRIVER_IF_VERSION 0x18 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h deleted file mode 100644 index deeb85047e7b..000000000000 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef SMU13_DRIVER_IF_DCN32_H -#define SMU13_DRIVER_IF_DCN32_H - -// *** IMPORTANT *** -// PMFW TEAM: Always increment the interface version on any change to this file -#define SMU13_DRIVER_IF_VERSION 0x18 - -//Only Clks that have DPM descriptors are listed here -typedef enum { - PPCLK_GFXCLK = 0, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_FCLK, - PPCLK_DCLK_0, - PPCLK_VCLK_0, - PPCLK_DCLK_1, - PPCLK_VCLK_1, - PPCLK_DISPCLK, - PPCLK_DPPCLK, - PPCLK_DPREFCLK, - PPCLK_DCFCLK, - PPCLK_DTBCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - UCLK_DIV_BY_1 = 0, - UCLK_DIV_BY_2, - UCLK_DIV_BY_4, - UCLK_DIV_BY_8, -} UCLK_DIV_e; - -typedef struct { - uint8_t WmSetting; - uint8_t Flags; - uint8_t Padding[2]; - -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WATERMARKS_CLOCK_RANGE = 0, - WATERMARKS_DUMMY_PSTATE, - WATERMARKS_MALL, - WATERMARKS_COUNT, -} WATERMARKS_FLAGS_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; -} Watermarks_t; - -typedef struct { - Watermarks_t Watermarks; - uint32_t Spare[16]; - - uint32_t MmHubPadding[8]; // SMU internal use -} WatermarksExternal_t; - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram - -// Table transfer status -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF -#define TABLE_TRANSFER_PENDING 0xAB - -// Table types -#define TABLE_PMFW_PPTABLE 0 -#define TABLE_COMBO_PPTABLE 1 -#define TABLE_WATERMARKS 2 -#define TABLE_AVFS_PSM_DEBUG 3 -#define TABLE_PMSTATUSLOG 4 -#define TABLE_SMU_METRICS 5 -#define TABLE_DRIVER_SMU_CONFIG 6 -#define TABLE_ACTIVITY_MONITOR_COEFF 7 -#define TABLE_OVERDRIVE 8 -#define TABLE_I2C_COMMANDS 9 -#define TABLE_DRIVER_INFO 10 -#define TABLE_COUNT 11 - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c new file mode 100644 index 000000000000..4607eff07253 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c @@ -0,0 +1,141 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dcn35_clk_mgr.h" + +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define mmCLK1_CLK_PLL_REQ 0x16E37 + +#define mmCLK1_CLK0_DFS_CNTL 0x16E69 +#define mmCLK1_CLK1_DFS_CNTL 0x16E6C +#define mmCLK1_CLK2_DFS_CNTL 0x16E6F +#define mmCLK1_CLK3_DFS_CNTL 0x16E72 +#define mmCLK1_CLK4_DFS_CNTL 0x16E75 +#define mmCLK1_CLK5_DFS_CNTL 0x16E78 + +#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC +#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD +#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE +#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF +#define mmCLK1_CLK4_CURRENT_CNT 0x16F00 +#define mmCLK1_CLK5_CURRENT_CNT 0x16F01 + +#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A +#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93 +#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C +#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5 +#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE +#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7 + +#define mmCLK1_CLK0_DS_CNTL 0x16E83 +#define mmCLK1_CLK1_DS_CNTL 0x16E8C +#define mmCLK1_CLK2_DS_CNTL 0x16E95 +#define mmCLK1_CLK3_DS_CNTL 0x16E9E +#define mmCLK1_CLK4_DS_CNTL 0x16EA7 +#define mmCLK1_CLK5_DS_CNTL 0x16EB0 + +#define mmCLK1_CLK0_ALLOW_DS 0x16E84 +#define mmCLK1_CLK1_ALLOW_DS 0x16E8D +#define mmCLK1_CLK2_ALLOW_DS 0x16E96 +#define mmCLK1_CLK3_ALLOW_DS 0x16E9F +#define mmCLK1_CLK4_ALLOW_DS 0x16EA8 +#define mmCLK1_CLK5_ALLOW_DS 0x16EB1 + +#define mmCLK5_spll_field_8 0x1B04B +#define mmCLK6_spll_field_8 0x1B24B +#define mmDENTIST_DISPCLK_CNTL 0x0124 +#define regDENTIST_DISPCLK_CNTL 0x0064 +#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L + +// DENTIST_DISPCLK_CNTL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L + +#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + +#define REG(reg) \ + (clk_mgr->regs->reg) + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define CLK_SR_DCN35(reg_name)\ + .reg_name = mm ## reg_name + +static const struct clk_mgr_registers clk_mgr_regs_dcn351 = { + CLK_REG_LIST_DCN35() +}; + +static const struct clk_mgr_shift clk_mgr_shift_dcn351 = { + CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct clk_mgr_mask clk_mgr_mask_dcn351 = { + CLK_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define TO_CLK_MGR_DCN35(clk_mgr)\ + container_of(clk_mgr, struct clk_mgr_dcn35, base) + + +void dcn351_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_dcn35 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg) +{ + /*register offset changed*/ + clk_mgr->base.regs = &clk_mgr_regs_dcn351; + clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351; + clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351; + + dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + +} + + diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index d9c5692c86c2..bb1ac12a2b09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -36,15 +36,11 @@ #include "dcn20/dcn20_clk_mgr.h" - - #include "reg_helper.h" #include "core_types.h" #include "dcn35_smu.h" #include "dm_helpers.h" -/* TODO: remove this include once we ported over remaining clk mgr functions*/ -#include "dcn30/dcn30_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" @@ -55,34 +51,104 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#define regCLK1_CLK_PLL_REQ 0x0237 -#define regCLK1_CLK_PLL_REQ_BASE_IDX 0 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define mmCLK1_CLK_PLL_REQ 0x16E37 + +#define mmCLK1_CLK0_DFS_CNTL 0x16E69 +#define mmCLK1_CLK1_DFS_CNTL 0x16E6C +#define mmCLK1_CLK2_DFS_CNTL 0x16E6F +#define mmCLK1_CLK3_DFS_CNTL 0x16E72 +#define mmCLK1_CLK4_DFS_CNTL 0x16E75 +#define mmCLK1_CLK5_DFS_CNTL 0x16E78 + +#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB +#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC +#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD +#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE +#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF +#define mmCLK1_CLK5_CURRENT_CNT 0x16F00 + +#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A +#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93 +#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C +#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5 +#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE +#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7 + +#define mmCLK1_CLK0_DS_CNTL 0x16E83 +#define mmCLK1_CLK1_DS_CNTL 0x16E8C +#define mmCLK1_CLK2_DS_CNTL 0x16E95 +#define mmCLK1_CLK3_DS_CNTL 0x16E9E +#define mmCLK1_CLK4_DS_CNTL 0x16EA7 +#define mmCLK1_CLK5_DS_CNTL 0x16EB0 + +#define mmCLK1_CLK0_ALLOW_DS 0x16E84 +#define mmCLK1_CLK1_ALLOW_DS 0x16E8D +#define mmCLK1_CLK2_ALLOW_DS 0x16E96 +#define mmCLK1_CLK3_ALLOW_DS 0x16E9F +#define mmCLK1_CLK4_ALLOW_DS 0x16EA8 +#define mmCLK1_CLK5_ALLOW_DS 0x16EB1 + +#define mmCLK5_spll_field_8 0x1B24B +#define mmCLK6_spll_field_8 0x1B24B +#define mmDENTIST_DISPCLK_CNTL 0x0124 +#define regDENTIST_DISPCLK_CNTL 0x0064 +#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +// DENTIST_DISPCLK_CNTL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L + +#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L +#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L -#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 -#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc -#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 -#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL -#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L -#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name -#define regCLK1_CLK2_BYPASS_CNTL 0x029c -#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 +#define REG(reg) \ + (clk_mgr->regs->reg) -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10 -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg -#define regCLK5_0_CLK5_spll_field_8 0x464b -#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 +#define BASE(seg) BASE_INNER(seg) -#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd -#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name -#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 +#define CLK_SR_DCN35(reg_name)\ + .reg_name = mm ## reg_name + +static const struct clk_mgr_registers clk_mgr_regs_dcn35 = { + CLK_REG_LIST_DCN35() +}; + +static const struct clk_mgr_shift clk_mgr_shift_dcn35 = { + CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; -#define REG(reg_name) \ - (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) +static const struct clk_mgr_mask clk_mgr_mask_dcn35 = { + CLK_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; #define TO_CLK_MGR_DCN35(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn35, base) @@ -120,22 +186,59 @@ static int dcn35_get_active_display_cnt_wa( return display_count; } - static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; + if (dc->ctx->dce_environment == DCE_ENV_DIAG) + return; + for (i = 0; i < dc->res_pool->pipe_count; ++i) { + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dccg *dccg = clk_mgr_internal->dccg; struct pipe_ctx *pipe = safe_to_lower ? &context->res_ctx.pipe_ctx[i] : &dc->current_state->res_ctx.pipe_ctx[i]; + struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc; + struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc; + bool stream_changed_otg_dig_on = false; + bool has_active_hpo = false; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || - !pipe->stream->link_enc)) { + + if (!dc->config.unify_link_enc_assignment) { + if (new_pipe->stream) + new_pipe_link_enc = new_pipe->stream->link_enc; + if (pipe->stream) + pipe_link_enc = pipe->stream->link_enc; + } + + stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream && + old_pipe->stream != new_pipe->stream && + old_pipe->stream_res.tg == new_pipe->stream_res.tg && + new_pipe_link_enc && !new_pipe->stream->dpms_off && + new_pipe_link_enc->funcs->is_dig_enabled && + new_pipe_link_enc->funcs->is_dig_enabled( + new_pipe_link_enc) && + new_pipe->stream_res.stream_enc && + new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && + new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); + + if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) { + has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && + dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe); + + } + + if (!has_active_hpo && !stream_changed_otg_dig_on && pipe->stream && + (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || !pipe_link_enc) && + !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe)) { + /* This w/a should not trigger when we have a dig active */ if (disable) { if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); @@ -218,6 +321,59 @@ static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, } } +static uint8_t get_lowest_dpia_index(const struct dc_link *link) +{ + const struct dc *dc_struct = link->dc; + uint8_t idx = 0xFF; + int i; + + for (i = 0; i < MAX_PIPES * 2; ++i) { + if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + continue; + + if (idx > dc_struct->links[i]->link_index) + idx = dc_struct->links[i]->link_index; + } + + return idx; +} + +static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower) +{ + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 }; + int i; + for (i = 0; i < context->stream_count; ++i) { + const struct dc_stream_state *stream = context->streams[i]; + const struct dc_link *link = stream->link; + uint8_t lowest_dpia_index = 0; + unsigned int hr_index = 0; + + if (!link) + continue; + + lowest_dpia_index = get_lowest_dpia_index(link); + if (link->link_index < lowest_dpia_index) + continue; + + hr_index = (link->link_index - lowest_dpia_index) / 2; + if (hr_index >= MAX_HOST_ROUTERS_NUM) + continue; + host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing( + &stream->timing, dc_link_get_highest_encoding_format(link)); + } + + for (i = 0; i < MAX_HOST_ROUTERS_NUM; ++i) { + new_clocks->host_router_bw_kbps[i] = host_router_bw_kbps[i]; + if (should_set_clock(safe_to_lower, new_clocks->host_router_bw_kbps[i], clk_mgr_base->clks.host_router_bw_kbps[i])) { + clk_mgr_base->clks.host_router_bw_kbps[i] = new_clocks->host_router_bw_kbps[i]; + dcn35_smu_notify_host_router_bw(clk_mgr, i, new_clocks->host_router_bw_kbps[i]); + } + } +} + void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -252,7 +408,9 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { - dcn35_smu_set_dtbclk(clk_mgr, false); + if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk) + dcn35_smu_set_dtbclk(clk_mgr, false); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in lower */ @@ -270,11 +428,17 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { - dcn35_smu_set_dtbclk(clk_mgr, true); - clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + int actual_dtbclk = 0; dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); - clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; + dcn35_smu_set_dtbclk(clk_mgr, true); + + actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT); + + if (actual_dtbclk) { + clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } } /* check that we're not already in D0 */ @@ -312,11 +476,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; @@ -341,6 +513,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } + // notify PMFW of bandwidth per DPIA tunnel + if (dc->debug.notify_dpia_hr_bw) + dcn35_notify_host_router_bw(clk_mgr_base, context, safe_to_lower); + // notify DMCUB of latest clocks memset(&cmd, 0, sizeof(cmd)); cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; @@ -360,7 +536,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) struct fixed31_32 pll_req; unsigned int fbmult_frac_val = 0; unsigned int fbmult_int_val = 0; - struct dc_context *ctx = clk_mgr->base.ctx; /* * Register value of fbmult is in 8.16 format, we are converting to 314.32 @@ -420,22 +595,24 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; - REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) { + ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK; + } else { + ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK; + } - return ssc_enable == 1; + return ssc_enable != 0; } static void init_clk_states(struct clk_mgr *clk_mgr) { - struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD) - clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; @@ -446,6 +623,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr) void dcn35_init_clocks(struct clk_mgr *clk_mgr) { struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + init_clk_states(clk_mgr); // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk @@ -540,6 +718,7 @@ static struct wm_table lpddr5_wm_table = { }; static DpmClocks_t_dcn35 dummy_clocks; +static DpmClocks_t_dcn351 dummy_clocks_dcn351; static struct dcn35_watermarks dummy_wms = { 0 }; @@ -550,10 +729,10 @@ static struct dcn35_ss_info_table ss_info_table = { static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) { - struct dc_context *ctx = clk_mgr->base.ctx; - uint32_t clock_source; + uint32_t clock_source = 0; + + clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK; - REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); // If it's DFS mode, clock_source is 0. if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; @@ -663,6 +842,22 @@ static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr); } +static void dcn351_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, + struct dcn351_smu_dpm_clks *smu_dpm_clks) +{ + DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks; + + if (!clk_mgr->smu_ver) + return; + if (!table || smu_dpm_clks->mc_address.quad_part == 0) + return; + memset(table, 0, sizeof(*table)); + dcn35_smu_set_dram_addr_high(clk_mgr, + smu_dpm_clks->mc_address.high_part); + dcn35_smu_set_dram_addr_low(clk_mgr, + smu_dpm_clks->mc_address.low_part); + dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr); +} static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) { uint32_t max = 0; @@ -889,35 +1084,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } -static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - - if (dc->config.disable_ips == DMUB_IPS_ENABLE || - dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } - - if (!allow_idle) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } - - dcn35_smu_write_ips_scratch(clk_mgr, val); -} - static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -930,18 +1096,8 @@ static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - bool ips_supported = true; - ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false; - - return ips_supported; -} - -static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - return dcn35_smu_read_ips_scratch(clk_mgr); + return dcn35_smu_get_ips_supported(clk_mgr) ? true : false; } static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) @@ -1031,8 +1187,6 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_ips_idle_state, - .get_idle_state = dcn35_get_ips_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { @@ -1042,6 +1196,57 @@ struct clk_mgr_funcs dcn35_fpga_funcs = { .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, }; +static void translate_to_DpmClocks_t_dcn35(struct dcn351_smu_dpm_clks *smu_dpm_clks_a, + struct dcn35_smu_dpm_clks *smu_dpm_clks_b) +{ + /*translate two structures and only take need clock tables*/ + uint8_t i; + + if (smu_dpm_clks_a == NULL || smu_dpm_clks_b == NULL || + smu_dpm_clks_a->dpm_clks == NULL || smu_dpm_clks_b->dpm_clks == NULL) + return; + + for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) + smu_dpm_clks_b->dpm_clks->DcfClocks[i] = smu_dpm_clks_a->dpm_clks->DcfClocks[i]; + + for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) + smu_dpm_clks_b->dpm_clks->DispClocks[i] = smu_dpm_clks_a->dpm_clks->DispClocks[i]; + + for (i = 0; i < NUM_DPPCLK_DPM_LEVELS; i++) + smu_dpm_clks_b->dpm_clks->DppClocks[i] = smu_dpm_clks_a->dpm_clks->DppClocks[i]; + + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { + smu_dpm_clks_b->dpm_clks->FclkClocks_Freq[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Freq[i]; + smu_dpm_clks_b->dpm_clks->FclkClocks_Voltage[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Voltage[i]; + } + for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) { + smu_dpm_clks_b->dpm_clks->MemPstateTable[i].MemClk = + smu_dpm_clks_a->dpm_clks->MemPstateTable[i].MemClk; + smu_dpm_clks_b->dpm_clks->MemPstateTable[i].UClk = + smu_dpm_clks_a->dpm_clks->MemPstateTable[i].UClk; + smu_dpm_clks_b->dpm_clks->MemPstateTable[i].Voltage = + smu_dpm_clks_a->dpm_clks->MemPstateTable[i].Voltage; + smu_dpm_clks_b->dpm_clks->MemPstateTable[i].WckRatio = + smu_dpm_clks_a->dpm_clks->MemPstateTable[i].WckRatio; + } + smu_dpm_clks_b->dpm_clks->MaxGfxClk = smu_dpm_clks_a->dpm_clks->MaxGfxClk; + smu_dpm_clks_b->dpm_clks->MinGfxClk = smu_dpm_clks_a->dpm_clks->MinGfxClk; + smu_dpm_clks_b->dpm_clks->NumDcfClkLevelsEnabled = + smu_dpm_clks_a->dpm_clks->NumDcfClkLevelsEnabled; + smu_dpm_clks_b->dpm_clks->NumDispClkLevelsEnabled = + smu_dpm_clks_a->dpm_clks->NumDispClkLevelsEnabled; + smu_dpm_clks_b->dpm_clks->NumFclkLevelsEnabled = + smu_dpm_clks_a->dpm_clks->NumFclkLevelsEnabled; + smu_dpm_clks_b->dpm_clks->NumMemPstatesEnabled = + smu_dpm_clks_a->dpm_clks->NumMemPstatesEnabled; + smu_dpm_clks_b->dpm_clks->NumSocClkLevelsEnabled = + smu_dpm_clks_a->dpm_clks->NumSocClkLevelsEnabled; + + for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { + smu_dpm_clks_b->dpm_clks->SocClocks[i] = smu_dpm_clks_a->dpm_clks->SocClocks[i]; + smu_dpm_clks_b->dpm_clks->SocVoltage[i] = smu_dpm_clks_a->dpm_clks->SocVoltage[i]; + } +} void dcn35_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn35 *clk_mgr, @@ -1049,6 +1254,7 @@ void dcn35_clk_mgr_construct( struct dccg *dccg) { struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; + struct dcn351_smu_dpm_clks smu_dpm_clks_dcn351 = { 0 }; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn35_funcs; @@ -1061,10 +1267,16 @@ void dcn35_clk_mgr_construct( clk_mgr->base.dprefclk_ss_divider = 1000; clk_mgr->base.ss_on_dprefclk = false; clk_mgr->base.dfs_ref_freq_khz = 48000; + if (ctx->dce_version != DCN_VERSION_3_51) { + clk_mgr->base.regs = &clk_mgr_regs_dcn35; + clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35; + clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35; + } + clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, - DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + DC_MEM_ALLOC_TYPE_GART, sizeof(struct dcn35_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); @@ -1076,17 +1288,27 @@ void dcn35_clk_mgr_construct( smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, - DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + DC_MEM_ALLOC_TYPE_GART, sizeof(DpmClocks_t_dcn35), &smu_dpm_clks.mc_address.quad_part); - if (smu_dpm_clks.dpm_clks == NULL) { smu_dpm_clks.dpm_clks = &dummy_clocks; smu_dpm_clks.mc_address.quad_part = 0; } - ASSERT(smu_dpm_clks.dpm_clks); + if (ctx->dce_version == DCN_VERSION_3_51) { + smu_dpm_clks_dcn351.dpm_clks = (DpmClocks_t_dcn351 *)dm_helpers_allocate_gpu_mem( + clk_mgr->base.base.ctx, + DC_MEM_ALLOC_TYPE_GART, + sizeof(DpmClocks_t_dcn351), + &smu_dpm_clks_dcn351.mc_address.quad_part); + if (smu_dpm_clks_dcn351.dpm_clks == NULL) { + smu_dpm_clks_dcn351.dpm_clks = &dummy_clocks_dcn351; + smu_dpm_clks_dcn351.mc_address.quad_part = 0; + } + } + clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base); if (clk_mgr->base.smu_ver) @@ -1115,7 +1337,11 @@ void dcn35_clk_mgr_construct( if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { int i; - dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); + if (ctx->dce_version == DCN_VERSION_3_51) { + dcn351_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks_dcn351); + translate_to_DpmClocks_t_dcn35(&smu_dpm_clks_dcn351, &smu_dpm_clks); + } else + dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" "NumDispClkLevelsEnabled: %d\n" "NumSocClkLevelsEnabled: %d\n" @@ -1164,7 +1390,7 @@ void dcn35_clk_mgr_construct( i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage); } - if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { + if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { dcn35_clk_mgr_helper_populate_bw_params( &clk_mgr->base, ctx->dc_bios->integrated_info, @@ -1173,9 +1399,13 @@ void dcn35_clk_mgr_construct( } if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) - dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART, smu_dpm_clks.dpm_clks); + if (smu_dpm_clks_dcn351.dpm_clks && smu_dpm_clks_dcn351.mc_address.quad_part != 0) + dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART, + smu_dpm_clks_dcn351.dpm_clks); + if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { bool ips_support = false; @@ -1186,6 +1416,12 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_dpp_power_gate = false; ctx->dc->debug.disable_hubp_power_gate = false; ctx->dc->debug.disable_dsc_power_gate = false; + + /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */ + if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE && + ctx->dce_version != DCN_VERSION_3_51 && + ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00)) + ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; } else { /*let's reset the config control flag*/ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h index 1203dc605b12..a12a9bf90806 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h @@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx, void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); +void dcn351_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_dcn35 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg); #endif //__DCN35_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index 9e588c56c570..604d256cb47a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -84,12 +84,13 @@ #define VBIOSSMC_MSG_AllowZstatesEntry 0x15 #define VBIOSSMC_MSG_DisallowZstatesEntry 0x16 #define VBIOSSMC_MSG_SetDtbClk 0x17 -#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU -#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU +#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU +#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU #define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS #define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number #define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported -#define VBIOSSMC_Message_Count 0x1D +#define VBIOSSMC_MSG_NotifyHostRouterBW 0x1D +#define VBIOSSMC_Message_Count 0x1E #define VBIOSSMC_Status_BUSY 0x0 #define VBIOSSMC_Result_OK 0x1 @@ -98,6 +99,14 @@ #define VBIOSSMC_Result_CmdRejectedPrereq 0xFD #define VBIOSSMC_Result_CmdRejectedBusy 0xFC +union dcn35_dpia_host_router_bw { + struct { + uint32_t hr_id : 16; + uint32_t bw_mbps : 16; + } bits; + uint32_t all; +}; + /* * Function to be used instead of REG_WAIT macro because the wait ends when * the register is NOT EQUAL to zero, and because `the translation in msg_if.h @@ -466,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) retv = dcn35_smu_send_msg_with_param( clk_mgr, - VBIOSSMC_MSG_DispPsrExit, + VBIOSSMC_MSG_DispIPS2Exit, 0); smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); return retv; @@ -488,23 +497,12 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) return retv; } -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) -{ - if (!clk_mgr->smu_present) - return; - - REG_WRITE(MP1_SMN_C2PMSG_71, param); - //smu_print("%s: write_ips_scratch = %x\n", __func__, param); -} - -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) +void dcn35_smu_notify_host_router_bw(struct clk_mgr_internal *clk_mgr, uint32_t hr_id, uint32_t bw_kbps) { - uint32_t retv; + union dcn35_dpia_host_router_bw msg_data = { 0 }; - if (!clk_mgr->smu_present) - return 0; + msg_data.bits.hr_id = hr_id; + msg_data.bits.bw_mbps = bw_kbps / 1000; - retv = REG_READ(MP1_SMN_C2PMSG_71); - //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); - return retv; + dcn35_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_NotifyHostRouterBW, msg_data.all); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 2b8e6959a03d..ab9d21ba0c43 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -126,18 +126,31 @@ typedef struct { uint32_t MaxGfxClk; } DpmClocks_t_dcn35; - -// Throttler Status Bitmask - - - - - - - - - - +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t VClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; + uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; + uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; // Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t Vcn0ClkLevelsEnabled; // Applies to both Vclk0 and Dclk0 + uint8_t Vcn1ClkLevelsEnabled; // Applies to both Vclk1 and Dclk1 + uint8_t VpeClkLevelsEnabled; + uint8_t NumMemPstatesEnabled; + uint8_t NumFclkLevelsEnabled; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_t_dcn351; #define TABLE_BIOS_IF 0 // Called by BIOS #define TABLE_WATERMARKS 1 // Called by DAL through VBIOS @@ -163,6 +176,10 @@ struct dcn35_smu_dpm_clks { union large_integer mc_address; }; +struct dcn351_smu_dpm_clks { + DpmClocks_t_dcn351 *dpm_clks; + union large_integer mc_address; +}; /* TODO: taken from vgh, may not be correct */ struct display_idle_optimization { unsigned int df_request_disabled : 1; @@ -198,6 +215,6 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); +void dcn35_smu_notify_host_router_bw(struct clk_mgr_internal *clk_mgr, uint32_t hr_id, uint32_t bw_kbps); + #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h new file mode 100644 index 000000000000..2e0d34fd7512 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#ifndef DALSMC_H +#define DALSMC_H + +#define DALSMC_VERSION 0x1 + +// SMU Response Codes: +#define DALSMC_Result_OK 0x1 +#define DALSMC_Result_Failed 0xFF +#define DALSMC_Result_UnknownCmd 0xFE +#define DALSMC_Result_CmdRejectedPrereq 0xFD +#define DALSMC_Result_CmdRejectedBusy 0xFC + + + +// Message Definitions: +#define DALSMC_MSG_TestMessage 0x1 +#define DALSMC_MSG_GetSmuVersion 0x2 +#define DALSMC_MSG_GetDriverIfVersion 0x3 +#define DALSMC_MSG_GetMsgHeaderVersion 0x4 +#define DALSMC_MSG_SetDalDramAddrHigh 0x5 +#define DALSMC_MSG_SetDalDramAddrLow 0x6 +#define DALSMC_MSG_TransferTableSmu2Dram 0x7 +#define DALSMC_MSG_TransferTableDram2Smu 0x8 +#define DALSMC_MSG_SetHardMinByFreq 0x9 +#define DALSMC_MSG_SetHardMaxByFreq 0xA +#define DALSMC_MSG_GetDpmFreqByIndex 0xB +#define DALSMC_MSG_GetDcModeMaxDpmFreq 0xC +#define DALSMC_MSG_SetMinDeepSleepDcfclk 0xD +#define DALSMC_MSG_NumOfDisplays 0xE +#define DALSMC_MSG_SetExternalClientDfCstateAllow 0xF +#define DALSMC_MSG_BacoAudioD3PME 0x10 +#define DALSMC_MSG_SetFclkSwitchAllow 0x11 +#define DALSMC_MSG_SetCabForUclkPstate 0x12 +#define DALSMC_MSG_SetWorstCaseUclkLatency 0x13 +#define DALSMC_MSG_DcnExitReset 0x14 +#define DALSMC_MSG_ReturnHardMinStatus 0x15 +#define DALSMC_MSG_SetAlwaysWaitDmcubResp 0x16 +#define DALSMC_MSG_IndicateDrrStatus 0x17 // PMFW 15811 +#define DALSMC_MSG_ActiveUclkFclk 0x18 +#define DALSMC_MSG_IdleUclkFclk 0x19 +#define DALSMC_MSG_SetUclkPstateAllow 0x1A +#define DALSMC_MSG_SubvpUclkFclk 0x1B +#define DALSMC_MSG_GetNumUmcChannels 0x1C +#define DALSMC_Message_Count 0x1D + +typedef enum { + FCLK_SWITCH_DISALLOW, + FCLK_SWITCH_ALLOW, +} FclkSwitchAllow_e; + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c new file mode 100644 index 000000000000..a3b8e3d4a429 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -0,0 +1,1593 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "dccg.h" +#include "clk_mgr_internal.h" +#include "dcn401/dcn401_clk_mgr_smu_msg.h" +#include "dcn20/dcn20_clk_mgr.h" +#include "dce100/dce_clk_mgr.h" +#include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" +#include "dcn401/dcn401_clk_mgr.h" +#include "reg_helper.h" +#include "core_types.h" +#include "dm_helpers.h" +#include "link.h" +#include "dc_state_priv.h" +#include "atomfirmware.h" + +#include "dcn401_smu14_driver_if.h" + +#include "dcn/dcn_4_1_0_offset.h" +#include "dcn/dcn_4_1_0_sh_mask.h" + +#include "dml/dcn401/dcn401_fpu.h" + +#define DCN_BASE__INST0_SEG1 0x000000C0 + +#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 +#define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E69 +#define mmCLK01_CLK0_CLK1_DFS_CNTL 0x16E6C +#define mmCLK01_CLK0_CLK2_DFS_CNTL 0x16E6F +#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E72 +#define mmCLK01_CLK0_CLK4_DFS_CNTL 0x16E75 +#define mmCLK20_CLK2_CLK2_DFS_CNTL 0x1B051 + +#define CLK0_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL +#define CLK0_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL +#define CLK0_CLK_PLL_REQ__FbMult_frac_MASK 0xffff0000UL +#define CLK0_CLK_PLL_REQ__FbMult_int__SHIFT 0x00000000 +#define CLK0_CLK_PLL_REQ__PllSpineDiv__SHIFT 0x0000000c +#define CLK0_CLK_PLL_REQ__FbMult_frac__SHIFT 0x00000010 + +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name + +#define REG(reg) \ + (clk_mgr->regs->reg) + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define CLK_SR_DCN401(reg_name, block, inst)\ + .reg_name = mm ## block ## _ ## reg_name + +static const struct clk_mgr_registers clk_mgr_regs_dcn401 = { + CLK_REG_LIST_DCN401() +}; + +static const struct clk_mgr_shift clk_mgr_shift_dcn401 = { + CLK_COMMON_MASK_SH_LIST_DCN401(__SHIFT) +}; + +static const struct clk_mgr_mask clk_mgr_mask_dcn401 = { + CLK_COMMON_MASK_SH_LIST_DCN401(_MASK) +}; + +#define TO_DCN401_CLK_MGR(clk_mgr)\ + container_of(clk_mgr, struct dcn401_clk_mgr, base) + +static bool dcn401_is_ppclk_dpm_enabled(struct clk_mgr_internal *clk_mgr, PPCLK_e clk) +{ + bool ppclk_dpm_enabled = false; + + switch (clk) { + case PPCLK_SOCCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_socclk_levels > 1; + break; + case PPCLK_UCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_memclk_levels > 1; + break; + case PPCLK_FCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels > 1; + break; + case PPCLK_DISPCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels > 1; + break; + case PPCLK_DPPCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels > 1; + break; + case PPCLK_DPREFCLK: + ppclk_dpm_enabled = false; + break; + case PPCLK_DCFCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels > 1; + break; + case PPCLK_DTBCLK: + ppclk_dpm_enabled = + clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels > 1; + break; + default: + ppclk_dpm_enabled = false; + } + + ppclk_dpm_enabled &= clk_mgr->smu_present; + + return ppclk_dpm_enabled; +} + +static bool dcn401_is_ppclk_idle_dpm_enabled(struct clk_mgr_internal *clk_mgr, PPCLK_e clk) +{ + bool ppclk_idle_dpm_enabled = false; + + switch (clk) { + case PPCLK_UCLK: + case PPCLK_FCLK: + if (ASICREV_IS_GC_12_0_0_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) && + clk_mgr->smu_ver >= 0x681800) { + ppclk_idle_dpm_enabled = true; + } else if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) && + clk_mgr->smu_ver >= 0x661300) { + ppclk_idle_dpm_enabled = true; + } + break; + default: + ppclk_idle_dpm_enabled = false; + } + + ppclk_idle_dpm_enabled &= clk_mgr->smu_present; + + return ppclk_idle_dpm_enabled; +} + +static bool dcn401_is_df_throttle_opt_enabled(struct clk_mgr_internal *clk_mgr) +{ + bool is_df_throttle_opt_enabled = false; + + if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) && + clk_mgr->smu_ver >= 0x663500) { + is_df_throttle_opt_enabled = !clk_mgr->base.ctx->dc->debug.force_subvp_df_throttle; + } + + is_df_throttle_opt_enabled &= clk_mgr->smu_present; + + return is_df_throttle_opt_enabled; +} + +/* Query SMU for all clock states for a particular clock */ +static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, + unsigned int *num_levels) +{ + unsigned int i; + char *entry_i = (char *)entry_0; + + uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF); + + if (ret & (1 << 31)) + /* fine-grained, only min and max */ + *num_levels = 2; + else + /* discrete, a number of fixed states */ + /* will set num_levels to 0 on failure */ + *num_levels = ret & 0xFF; + + /* if the initial message failed, num_levels will be 0 */ + for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) { + *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF); + entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]); + } +} + +static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr) +{ + /* legacy */ + DC_FP_START(); + dcn401_build_wm_range_table_fpu(clk_mgr); + DC_FP_END(); + + if (clk_mgr->ctx->dc->debug.using_dml21) { + /* For min clocks use as reported by PM FW and report those as min */ + uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; + uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + + /* Set A - Normal - default values */ + clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; + + /* Set B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; + + /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ + /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ + if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; + } else { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; + } + + /* Set 1B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; + } +} + +void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct clk_limit_num_entries *num_entries_per_clk; + unsigned int i; + + if (!clk_mgr_base->bw_params) + return; + + num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + + memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); + clk_mgr_base->clks.p_state_change_support = true; + clk_mgr_base->clks.prev_p_state_change_support = true; + clk_mgr_base->clks.fclk_prev_p_state_change_support = true; + clk_mgr->smu_present = false; + clk_mgr->dpm_present = false; + + if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver)) + clk_mgr->smu_present = true; + + if (!clk_mgr->smu_present) + return; + + dcn30_smu_check_driver_if_version(clk_mgr); + dcn30_smu_check_msg_header_version(clk_mgr); + + /* DCFCLK */ + dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, + &num_entries_per_clk->num_dcfclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); + if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0; + + /* SOCCLK */ + dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, + &num_entries_per_clk->num_socclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); + if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0; + + /* DTBCLK */ + if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) { + dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, + &num_entries_per_clk->num_dtbclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); + if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0; + } + + /* DISPCLK */ + dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, + &num_entries_per_clk->num_dispclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); + if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0; + + /* DPPCLK */ + dcn401_init_single_clock(clk_mgr, PPCLK_DPPCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz, + &num_entries_per_clk->num_dppclk_levels); + + if (num_entries_per_clk->num_dcfclk_levels && + num_entries_per_clk->num_dtbclk_levels && + num_entries_per_clk->num_dispclk_levels) + clk_mgr->dpm_present = true; + + if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) { + for (i = 0; i < num_entries_per_clk->num_dispclk_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz + < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz)) + clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz + = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz); + } + + if (clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz) { + for (i = 0; i < num_entries_per_clk->num_dppclk_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz + < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz)) + clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz + = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); + } + + /* Get UCLK, update bounding box */ + clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); + + /* WM range table */ + dcn401_build_wm_range_table(clk_mgr_base); +} + +static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t dprefclk_did = 0; + uint32_t dcfclk_did = 0; + uint32_t dtbclk_did = 0; + uint32_t dispclk_did = 0; + uint32_t dppclk_did = 0; + uint32_t fclk_did = 0; + uint32_t target_div = 0; + + /* DFS Slice 0 is used for DISPCLK */ + dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL); + /* DFS Slice 1 is used for DPPCLK */ + dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL); + /* DFS Slice 2 is used for DPREFCLK */ + dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL); + /* DFS Slice 3 is used for DCFCLK */ + dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL); + /* DFS Slice 4 is used for DTBCLK */ + dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL); + /* DFS Slice _ is used for FCLK */ + fclk_did = REG_READ(CLK2_CLK2_DFS_CNTL); + + /* Convert DISPCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dispclk_did); + //Get dispclk in khz + regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DISPCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dppclk_did); + //Get dppclk in khz + regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DPREFCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dprefclk_did); + //Get dprefclk in khz + regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DCFCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dcfclk_did); + //Get dcfclk in khz + regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DTBCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dtbclk_did); + //Get dtbclk in khz + regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DTBCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(fclk_did); + //Get fclk in khz + regs_and_bypass->fclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; +} + +static bool dcn401_check_native_scaling(struct pipe_ctx *pipe) +{ + bool is_native_scaling = false; + int width = pipe->plane_state->src_rect.width; + int height = pipe->plane_state->src_rect.height; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +static void dcn401_auto_dpm_test_log( + struct dc_clocks *new_clocks, + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) +{ + unsigned int mall_ss_size_bytes; + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; + + struct pipe_ctx *pipe_ctx_list[MAX_PIPES]; + int active_pipe_count = 0; + + for (int i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { + pipe_ctx_list[active_pipe_count] = pipe_ctx; + active_pipe_count++; + } + } + + msleep(5); + + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; + + struct clk_log_info log_info = {0}; + struct clk_state_registers_and_bypass clk_register_dump; + + dcn401_dump_clk_registers(&clk_register_dump, &clk_mgr->base, &log_info); + + // Overrides for these clocks in case there is no p_state change support + dramclk_khz_override = new_clocks->dramclk_khz; + fclk_khz_override = new_clocks->fclk_khz; + + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; + + if (!new_clocks->p_state_change_support) + dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; + + if (!new_clocks->fclk_p_state_change_support) + fclk_khz_override = clk_mgr->base.bw_params->clk_table.entries[num_fclk_levels].fclk_mhz * 1000; + + + //////////////////////////////////////////////////////////////////////////// + // IMPORTANT: When adding more clocks to these logs, do NOT put a newline + // anywhere other than at the very end of the string. + // + // Formatting example (make sure to have " - " between each entry): + // + // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" + //////////////////////////////////////////////////////////////////////////// + if (active_pipe_count > 0 && + new_clocks->dramclk_khz > 0 && + new_clocks->fclk_khz > 0 && + new_clocks->dcfclk_khz > 0 && + new_clocks->dppclk_khz > 0) { + + uint32_t pix_clk_list[MAX_PIPES] = {0}; + int p_state_list[MAX_PIPES] = {0}; + int disp_src_width_list[MAX_PIPES] = {0}; + int disp_src_height_list[MAX_PIPES] = {0}; + uint64_t disp_src_refresh_list[MAX_PIPES] = {0}; + bool is_scaled_list[MAX_PIPES] = {0}; + + for (int i = 0; i < active_pipe_count; i++) { + struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i]; + uint64_t refresh_rate; + + pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz; + p_state_list[i] = curr_pipe_ctx->p_state_type; + + refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + + curr_pipe_ctx->stream->timing.v_total + * (uint64_t) curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); + disp_src_refresh_list[i] = refresh_rate; + + if (curr_pipe_ctx->plane_state) { + is_scaled_list[i] = !(dcn401_check_native_scaling(curr_pipe_ctx)); + disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width; + disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height; + } + } + + DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - " + "dcfclk:%d - dppclk:%d - dispclk_hw:%d - " + "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - " + "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - " + "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - " + "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - " + "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - " + "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - " + "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - " + "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n", + dramclk_khz_override, + fclk_khz_override, + new_clocks->dcfclk_khz, + new_clocks->dppclk_khz, + clk_register_dump.dispclk, + clk_register_dump.dppclk, + clk_register_dump.dprefclk, + clk_register_dump.dcfclk, + clk_register_dump.dtbclk, + clk_register_dump.fclk, + pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2], + mall_ss_size_bytes, + p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3], + disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0], + disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1], + disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2], + disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]); + } +} + +static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + int ref_dtbclk_khz) +{ + int i; + struct dccg *dccg = clk_mgr->dccg; + struct pipe_ctx *otg_master; + bool use_hpo_encoder; + + + for (i = 0; i < context->stream_count; i++) { + otg_master = resource_get_otg_master_for_stream( + &context->res_ctx, context->streams[i]); + ASSERT(otg_master); + ASSERT(otg_master->clock_source); + ASSERT(otg_master->clock_source->funcs->program_pix_clk); + ASSERT(otg_master->stream_res.pix_clk_params.controller_id >= CONTROLLER_ID_D0); + + use_hpo_encoder = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(otg_master); + if (!use_hpo_encoder) + continue; + + if (otg_master->stream_res.pix_clk_params.controller_id > CONTROLLER_ID_UNDEFINED) + otg_master->clock_source->funcs->program_pix_clk( + otg_master->clock_source, + &otg_master->stream_res.pix_clk_params, + dccg->ctx->dc->link_srv->dp_get_encoding_format( + &otg_master->link_config.dp_link_settings), + &otg_master->pll_settings); + } +} + +static void dcn401_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, + struct dc_state *context, bool safe_to_lower, int ref_dppclk_khz) +{ + int i; + + clk_mgr->dccg->ref_dppclk = ref_dppclk_khz; + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; + + dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + + if (context->res_ctx.pipe_ctx[i].plane_res.dpp) + dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; + else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) { + /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting. + * In this case just continue in loop + */ + continue; + } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) { + /* The software state is not valid if dpp resource is NULL and + * dppclk_khz > 0. + */ + ASSERT(false); + continue; + } + + prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; + + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) + clk_mgr->dccg->funcs->update_dpp_dto( + clk_mgr->dccg, dpp_inst, dppclk_khz); + } +} + +static int dcn401_set_hard_min_by_freq_optimized(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, int requested_clk_khz) +{ + if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, clk)) + return 0; + + /* + * SMU set hard min interface takes requested clock in mhz and return + * actual clock configured in khz. If we floor requested clk to mhz, + * there is a chance that the actual clock configured in khz is less + * than requested. If we ceil it to mhz, there is a chance that it + * unnecessarily dumps up to a higher dpm level, which burns more power. + * The solution is to set by flooring it to mhz first. If the actual + * clock returned is less than requested, then we will ceil the + * requested value to mhz and call it again. + */ + int actual_clk_khz = dcn401_smu_set_hard_min_by_freq(clk_mgr, clk, khz_to_mhz_floor(requested_clk_khz)); + + if (actual_clk_khz < requested_clk_khz) + actual_clk_khz = dcn401_smu_set_hard_min_by_freq(clk_mgr, clk, khz_to_mhz_ceil(requested_clk_khz)); + + return actual_clk_khz; +} + +static void dcn401_update_clocks_update_dentist( + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) +{ + uint32_t new_disp_divider = 0; + uint32_t new_dispclk_wdivider = 0; + uint32_t dentist_dispclk_wdivider_readback = 0; + struct dc *dc = clk_mgr->base.ctx->dc; + + if (clk_mgr->base.clks.dispclk_khz == 0) + return; + + new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + + new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider); + + if (dc->debug.override_dispclk_programming) { + REG_GET(DENTIST_DISPCLK_CNTL, + DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback); + + if (dentist_dispclk_wdivider_readback > new_dispclk_wdivider) { + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DISPCLK_WDIVIDER, new_dispclk_wdivider); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); + } + } + +} + +static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned int num_steps) +{ + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal); + + unsigned int i; + union dcn401_clk_mgr_block_sequence_params *params; + + /* execute sequence */ + for (i = 0; i < num_steps; i++) { + params = &clk_mgr401->block_sequence[i].params; + + switch (clk_mgr401->block_sequence[i].func) { + case CLK_MGR401_READ_CLOCKS_FROM_DENTIST: + dcn2_read_clocks_from_hw_dentist(clk_mgr_base); + break; + case CLK_MGR401_UPDATE_NUM_DISPLAYS: + dcn401_smu_set_num_of_displays(clk_mgr_internal, + params->update_num_displays_params.num_displays); + break; + case CLK_MGR401_UPDATE_HARDMIN_PPCLK: + if (params->update_hardmin_params.response) + *params->update_hardmin_params.response = dcn401_smu_set_hard_min_by_freq( + clk_mgr_internal, + params->update_hardmin_params.ppclk, + params->update_hardmin_params.freq_mhz); + else + dcn401_smu_set_hard_min_by_freq(clk_mgr_internal, + params->update_hardmin_params.ppclk, + params->update_hardmin_params.freq_mhz); + break; + case CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED: + if (params->update_hardmin_optimized_params.response) + *params->update_hardmin_optimized_params.response = dcn401_set_hard_min_by_freq_optimized( + clk_mgr_internal, + params->update_hardmin_optimized_params.ppclk, + params->update_hardmin_optimized_params.freq_khz); + else + dcn401_set_hard_min_by_freq_optimized(clk_mgr_internal, + params->update_hardmin_optimized_params.ppclk, + params->update_hardmin_optimized_params.freq_khz); + break; + case CLK_MGR401_UPDATE_ACTIVE_HARDMINS: + dcn401_smu_set_active_uclk_fclk_hardmin( + clk_mgr_internal, + params->update_idle_hardmin_params.uclk_mhz, + params->update_idle_hardmin_params.fclk_mhz); + break; + case CLK_MGR401_UPDATE_IDLE_HARDMINS: + dcn401_smu_set_idle_uclk_fclk_hardmin( + clk_mgr_internal, + params->update_idle_hardmin_params.uclk_mhz, + params->update_idle_hardmin_params.fclk_mhz); + break; + case CLK_MGR401_UPDATE_SUBVP_HARDMINS: + dcn401_smu_set_subvp_uclk_fclk_hardmin( + clk_mgr_internal, + params->update_idle_hardmin_params.uclk_mhz, + params->update_idle_hardmin_params.fclk_mhz); + break; + case CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK: + dcn401_smu_set_min_deep_sleep_dcef_clk( + clk_mgr_internal, + params->update_deep_sleep_dcfclk_params.freq_mhz); + break; + case CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT: + dcn401_smu_send_fclk_pstate_message( + clk_mgr_internal, + params->update_pstate_support_params.support); + break; + case CLK_MGR401_UPDATE_UCLK_PSTATE_SUPPORT: + dcn401_smu_send_uclk_pstate_message( + clk_mgr_internal, + params->update_pstate_support_params.support); + break; + case CLK_MGR401_UPDATE_CAB_FOR_UCLK: + dcn401_smu_send_cab_for_uclk_message( + clk_mgr_internal, + params->update_cab_for_uclk_params.num_ways); + break; + case CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK: + dcn401_smu_wait_for_dmub_ack_mclk( + clk_mgr_internal, + params->update_wait_for_dmub_ack_params.enable); + break; + case CLK_MGR401_INDICATE_DRR_STATUS: + dcn401_smu_indicate_drr_status( + clk_mgr_internal, + params->indicate_drr_status_params.mod_drr_for_pstate); + break; + case CLK_MGR401_UPDATE_DPPCLK_DTO: + dcn401_update_clocks_update_dpp_dto( + clk_mgr_internal, + params->update_dppclk_dto_params.context, + params->update_dppclk_dto_params.safe_to_lower, + *params->update_dppclk_dto_params.ref_dppclk_khz); + break; + case CLK_MGR401_UPDATE_DTBCLK_DTO: + dcn401_update_clocks_update_dtb_dto( + clk_mgr_internal, + params->update_dtbclk_dto_params.context, + *params->update_dtbclk_dto_params.ref_dtbclk_khz); + break; + case CLK_MGR401_UPDATE_DENTIST: + dcn401_update_clocks_update_dentist( + clk_mgr_internal, + params->update_dentist_params.context); + break; + case CLK_MGR401_UPDATE_PSR_WAIT_LOOP: + params->update_psr_wait_loop_params.dmcu->funcs->set_psr_wait_loop( + params->update_psr_wait_loop_params.dmcu, + params->update_psr_wait_loop_params.wait); + break; + default: + /* this should never happen */ + BREAK_TO_DEBUGGER(); + break; + } + } +} + +static unsigned int dcn401_build_update_bandwidth_clocks_sequence( + struct clk_mgr *clk_mgr_base, + struct dc_state *context, + struct dc_clocks *new_clocks, + bool safe_to_lower) +{ + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal); + struct dc *dc = clk_mgr_base->ctx->dc; + struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence; + bool enter_display_off = false; + bool update_active_fclk = false; + bool update_active_uclk = false; + bool update_idle_fclk = false; + bool update_idle_uclk = false; + bool update_subvp_prefetch_dramclk = false; + bool update_subvp_prefetch_fclk = false; + bool is_idle_dpm_enabled = dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) && + dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK) && + dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) && + dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_FCLK); + bool is_df_throttle_opt_enabled = is_idle_dpm_enabled && + dcn401_is_df_throttle_opt_enabled(clk_mgr_internal); + int total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); + int active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz); + int active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz); + int idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz); + int idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz); + int subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz); + int subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz); + + unsigned int num_steps = 0; + + int display_count; + bool fclk_p_state_change_support, uclk_p_state_change_support; + + /* CLK_MGR401_UPDATE_NUM_DISPLAYS */ + if (clk_mgr_internal->smu_present) { + display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + + if (display_count == 0) + enter_display_off = true; + + if (enter_display_off == safe_to_lower) { + block_sequence[num_steps].params.update_num_displays_params.num_displays = display_count; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_NUM_DISPLAYS; + num_steps++; + } + } + + /* CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT */ + clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support; + fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0); + if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support)) { + clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support; + update_active_fclk = true; + update_idle_fclk = true; + + /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW (message not supported on DCN401)*/ + // if (clk_mgr_base->clks.fclk_p_state_change_support) { + // /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */ + // if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) { + // block_sequence[num_steps].params.update_pstate_support_params.support = true; + // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT; + // num_steps++; + // } + // } + } + + if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) { + /* when P-State switching disabled, set UCLK min = max */ + idle_fclk_mhz = + clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1].fclk_mhz; + active_fclk_mhz = idle_fclk_mhz; + } + + /* UPDATE DCFCLK */ + if (dc->debug.force_min_dcfclk_mhz > 0) + new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? + new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); + + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DCFCLK)) { + block_sequence[num_steps].params.update_hardmin_params.ppclk = PPCLK_DCFCLK; + block_sequence[num_steps].params.update_hardmin_params.freq_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz); + block_sequence[num_steps].params.update_hardmin_params.response = NULL; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK; + num_steps++; + } + } + + /* CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK */ + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DCFCLK)) { + block_sequence[num_steps].params.update_deep_sleep_dcfclk_params.freq_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz); + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK; + num_steps++; + } + } + + /* SOCCLK */ + if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) + /* We don't actually care about socclk, don't notify SMU of hard min */ + clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; + + /* UCLK */ + if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching && + new_clocks->fw_based_mclk_switching) { + /* enable FAMS features */ + clk_mgr_base->clks.fw_based_mclk_switching = new_clocks->fw_based_mclk_switching; + + block_sequence[num_steps].params.update_wait_for_dmub_ack_params.enable = clk_mgr_base->clks.fw_based_mclk_switching; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK; + num_steps++; + + block_sequence[num_steps].params.indicate_drr_status_params.mod_drr_for_pstate = clk_mgr_base->clks.fw_based_mclk_switching; + block_sequence[num_steps].func = CLK_MGR401_INDICATE_DRR_STATUS; + num_steps++; + } + + /* CLK_MGR401_UPDATE_CAB_FOR_UCLK */ + clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways; + if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && + clk_mgr_base->clks.num_ways < new_clocks->num_ways) { + /* increase num ways for subvp */ + clk_mgr_base->clks.num_ways = new_clocks->num_ways; + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) { + block_sequence[num_steps].params.update_cab_for_uclk_params.num_ways = clk_mgr_base->clks.num_ways; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_CAB_FOR_UCLK; + num_steps++; + } + } + + clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; + uclk_p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + if (should_update_pstate_support(safe_to_lower, uclk_p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support)) { + clk_mgr_base->clks.p_state_change_support = uclk_p_state_change_support; + update_active_uclk = true; + update_idle_uclk = true; + + if (clk_mgr_base->clks.p_state_change_support) { + /* enable UCLK switching */ + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) { + block_sequence[num_steps].params.update_pstate_support_params.support = true; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_UCLK_PSTATE_SUPPORT; + num_steps++; + } + } + } + + if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) { + /* when P-State switching disabled, set UCLK min = max */ + if (dc->clk_mgr->dc_mode_softmax_enabled) { + /* will never have the functional UCLK min above the softmax + * since we calculate mode support based on softmax being the max UCLK + * frequency. + */ + active_uclk_mhz = clk_mgr_base->bw_params->dc_mode_softmax_memclk; + } else { + active_uclk_mhz = clk_mgr_base->bw_params->max_memclk_mhz; + } + idle_uclk_mhz = active_uclk_mhz; + } + + /* Always update saved value, even if new value not set due to P-State switching unsupported */ + if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { + clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; + + if (clk_mgr_base->clks.p_state_change_support) { + update_active_uclk = true; + active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz); + } + } + + if (should_set_clock(safe_to_lower, new_clocks->idle_dramclk_khz, clk_mgr_base->clks.idle_dramclk_khz)) { + clk_mgr_base->clks.idle_dramclk_khz = new_clocks->idle_dramclk_khz; + + if (clk_mgr_base->clks.p_state_change_support) { + update_idle_uclk = true; + idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz); + } + } + + if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_dramclk_khz, clk_mgr_base->clks.subvp_prefetch_dramclk_khz)) { + clk_mgr_base->clks.subvp_prefetch_dramclk_khz = new_clocks->subvp_prefetch_dramclk_khz; + update_subvp_prefetch_dramclk = true; + subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz); + } + + /* FCLK */ + /* Always update saved value, even if new value not set due to P-State switching unsupported */ + if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) { + clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz; + + if (clk_mgr_base->clks.fclk_p_state_change_support) { + update_active_fclk = true; + active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz); + } + } + + if (should_set_clock(safe_to_lower, new_clocks->idle_fclk_khz, clk_mgr_base->clks.idle_fclk_khz)) { + clk_mgr_base->clks.idle_fclk_khz = new_clocks->idle_fclk_khz; + + if (clk_mgr_base->clks.fclk_p_state_change_support) { + update_idle_fclk = true; + idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz); + } + } + + if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_fclk_khz, clk_mgr_base->clks.subvp_prefetch_fclk_khz)) { + clk_mgr_base->clks.subvp_prefetch_fclk_khz = new_clocks->subvp_prefetch_fclk_khz; + update_subvp_prefetch_fclk = true; + subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz); + } + + /* When idle DPM is enabled, need to send active and idle hardmins separately */ + /* CLK_MGR401_UPDATE_ACTIVE_HARDMINS */ + if ((update_active_uclk || update_active_fclk) && is_idle_dpm_enabled) { + block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = active_uclk_mhz; + block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = active_fclk_mhz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_ACTIVE_HARDMINS; + num_steps++; + } + + /* CLK_MGR401_UPDATE_IDLE_HARDMINS */ + if ((update_idle_uclk || update_idle_fclk) && is_idle_dpm_enabled) { + block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = idle_uclk_mhz; + block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = idle_fclk_mhz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_IDLE_HARDMINS; + num_steps++; + } + + /* CLK_MGR401_UPDATE_SUBVP_HARDMINS */ + if ((update_subvp_prefetch_dramclk || update_subvp_prefetch_fclk) && is_df_throttle_opt_enabled) { + block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = subvp_prefetch_dramclk_mhz; + block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = subvp_prefetch_fclk_mhz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_SUBVP_HARDMINS; + num_steps++; + } + + /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ + if (update_active_uclk || update_idle_uclk) { + if (!is_idle_dpm_enabled) { + block_sequence[num_steps].params.update_hardmin_params.ppclk = PPCLK_UCLK; + block_sequence[num_steps].params.update_hardmin_params.freq_mhz = active_uclk_mhz; + block_sequence[num_steps].params.update_hardmin_params.response = NULL; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK; + num_steps++; + } + + /* disable UCLK P-State support if needed */ + if (!uclk_p_state_change_support && + should_update_pstate_support(safe_to_lower, uclk_p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support) && + dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) { + block_sequence[num_steps].params.update_pstate_support_params.support = false; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_UCLK_PSTATE_SUPPORT; + num_steps++; + } + } + + /* set FCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ + if (update_active_fclk || update_idle_fclk) { + /* No need to send active FCLK hardmin, automatically set based on DCFCLK */ + // if (!is_idle_dpm_enabled) { + // block_sequence[*num_steps].update_hardmin_params.clk_mgr = clk_mgr; + // block_sequence[*num_steps].update_hardmin_params.ppclk = PPCLK_FCLK; + // block_sequence[*num_steps].update_hardmin_params.freq_mhz = active_fclk_mhz; + // block_sequence[*num_steps].update_hardmin_params.response = NULL; + // block_sequence[*num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK; + // (*num_steps)++; + // } + + /* disable FCLK P-State support if needed (message not supported on DCN401)*/ + // if (!fclk_p_state_change_support && + // should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) && + // dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) { + // block_sequence[num_steps].params.update_pstate_support_params.support = false; + // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT; + // num_steps++; + // } + } + + if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching && + safe_to_lower && !new_clocks->fw_based_mclk_switching) { + /* disable FAMS features */ + clk_mgr_base->clks.fw_based_mclk_switching = new_clocks->fw_based_mclk_switching; + + block_sequence[num_steps].params.update_wait_for_dmub_ack_params.enable = clk_mgr_base->clks.fw_based_mclk_switching; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK; + num_steps++; + + block_sequence[num_steps].params.indicate_drr_status_params.mod_drr_for_pstate = clk_mgr_base->clks.fw_based_mclk_switching; + block_sequence[num_steps].func = CLK_MGR401_INDICATE_DRR_STATUS; + num_steps++; + } + + /* CLK_MGR401_UPDATE_CAB_FOR_UCLK */ + if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && + safe_to_lower && clk_mgr_base->clks.num_ways > new_clocks->num_ways) { + /* decrease num ways for subvp */ + clk_mgr_base->clks.num_ways = new_clocks->num_ways; + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) { + block_sequence[num_steps].params.update_cab_for_uclk_params.num_ways = clk_mgr_base->clks.num_ways; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_CAB_FOR_UCLK; + num_steps++; + } + } + + return num_steps; +} + +static unsigned int dcn401_build_update_display_clocks_sequence( + struct clk_mgr *clk_mgr_base, + struct dc_state *context, + struct dc_clocks *new_clocks, + bool safe_to_lower) +{ + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal); + struct dc *dc = clk_mgr_base->ctx->dc; + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; + struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence; + bool force_reset = false; + bool update_dispclk = false; + bool update_dppclk = false; + bool dppclk_lowered = false; + + unsigned int num_steps = 0; + + /* CLK_MGR401_READ_CLOCKS_FROM_DENTIST */ + if (clk_mgr_base->clks.dispclk_khz == 0 || + (dc->debug.force_clock_mode & 0x1)) { + /* This is from resume or boot up, if forced_clock cfg option used, + * we bypass program dispclk and DPPCLK, but need set them for S3. + * Force_clock_mode 0x1: force reset the clock even it is the same clock + * as long as it is in Passive level. + */ + force_reset = true; + + clk_mgr_base->clks.dispclk_khz = clk_mgr_base->boot_snapshot.dispclk; + clk_mgr_base->clks.actual_dispclk_khz = clk_mgr_base->clks.dispclk_khz; + + clk_mgr_base->clks.dppclk_khz = clk_mgr_base->boot_snapshot.dppclk; + clk_mgr_base->clks.actual_dppclk_khz = clk_mgr_base->clks.dppclk_khz; + } + + /* DTBCLK */ + if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DTBCLK)) { + new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; + } + + /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ + if (!dc->debug.disable_dtb_ref_clk_switch && + should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) && //TODO these should be ceiled + dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DTBCLK)) { + /* DCCG requires KHz precision for DTBCLK */ + block_sequence[num_steps].params.update_hardmin_params.ppclk = PPCLK_DTBCLK; + block_sequence[num_steps].params.update_hardmin_params.freq_mhz = khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz); + block_sequence[num_steps].params.update_hardmin_params.response = &clk_mgr_base->clks.ref_dtbclk_khz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK; + num_steps++; + + /* Update DTO in DCCG */ + block_sequence[num_steps].params.update_dtbclk_dto_params.context = context; + block_sequence[num_steps].params.update_dtbclk_dto_params.ref_dtbclk_khz = &clk_mgr_base->clks.ref_dtbclk_khz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DTBCLK_DTO; + num_steps++; + } + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) { + if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz) + dppclk_lowered = true; + + clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; + clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz; + + update_dppclk = true; + } + + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + + block_sequence[num_steps].params.update_hardmin_optimized_params.ppclk = PPCLK_DISPCLK; + block_sequence[num_steps].params.update_hardmin_optimized_params.freq_khz = clk_mgr_base->clks.dispclk_khz; + block_sequence[num_steps].params.update_hardmin_optimized_params.response = &clk_mgr_base->clks.actual_dispclk_khz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED; + num_steps++; + + update_dispclk = true; + } + + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dppclk_lowered) { + /* if clock is being lowered, increase DTO before lowering refclk */ + block_sequence[num_steps].params.update_dppclk_dto_params.context = context; + block_sequence[num_steps].params.update_dppclk_dto_params.ref_dppclk_khz = &clk_mgr_base->clks.dppclk_khz; + block_sequence[num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO; + num_steps++; + + block_sequence[num_steps].params.update_dentist_params.context = context; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DENTIST; + num_steps++; + + if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DPPCLK)) { + block_sequence[num_steps].params.update_hardmin_optimized_params.ppclk = PPCLK_DPPCLK; + block_sequence[num_steps].params.update_hardmin_optimized_params.freq_khz = clk_mgr_base->clks.dppclk_khz; + block_sequence[num_steps].params.update_hardmin_optimized_params.response = &clk_mgr_base->clks.actual_dppclk_khz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED; + num_steps++; + + block_sequence[num_steps].params.update_dppclk_dto_params.context = context; + block_sequence[num_steps].params.update_dppclk_dto_params.ref_dppclk_khz = &clk_mgr_base->clks.actual_dppclk_khz; + block_sequence[num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO; + num_steps++; + } + } else { + /* if clock is being raised, increase refclk before lowering DTO */ + if (update_dppclk && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_DPPCLK)) { + block_sequence[num_steps].params.update_hardmin_optimized_params.ppclk = PPCLK_DPPCLK; + block_sequence[num_steps].params.update_hardmin_optimized_params.freq_khz = clk_mgr_base->clks.dppclk_khz; + block_sequence[num_steps].params.update_hardmin_optimized_params.response = &clk_mgr_base->clks.actual_dppclk_khz; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED; + num_steps++; + } + + if (update_dppclk || update_dispclk) { + block_sequence[num_steps].params.update_dentist_params.context = context; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DENTIST; + num_steps++; + } + + block_sequence[num_steps].params.update_dppclk_dto_params.context = context; + block_sequence[num_steps].params.update_dppclk_dto_params.ref_dppclk_khz = &clk_mgr_base->clks.actual_dppclk_khz; + block_sequence[num_steps].params.update_dppclk_dto_params.safe_to_lower = safe_to_lower; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_DPPCLK_DTO; + num_steps++; + } + } + + if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + /*update dmcu for wait_loop count*/ + block_sequence[num_steps].params.update_psr_wait_loop_params.dmcu = dmcu; + block_sequence[num_steps].params.update_psr_wait_loop_params.wait = clk_mgr_base->clks.dispclk_khz / 1000 / 7; + block_sequence[num_steps].func = CLK_MGR401_UPDATE_PSR_WAIT_LOOP; + num_steps++; + } + + return num_steps; +} + +static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + struct dc *dc = clk_mgr_base->ctx->dc; + + unsigned int num_steps = 0; + + /* build bandwidth related clocks update sequence */ + num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base, + context, + &context->bw_ctx.bw.dcn.clk, + safe_to_lower); + + /* execute sequence */ + dcn401_execute_block_sequence(clk_mgr_base, num_steps); + + /* build display related clocks update sequence */ + num_steps = dcn401_build_update_display_clocks_sequence(clk_mgr_base, + context, + &context->bw_ctx.bw.dcn.clk, + safe_to_lower); + + /* execute sequence */ + dcn401_execute_block_sequence(clk_mgr_base, num_steps); + + if (dc->config.enable_auto_dpm_test_logs) + dcn401_auto_dpm_test_log(&context->bw_ctx.bw.dcn.clk, TO_CLK_MGR_INTERNAL(clk_mgr_base), context); + +} + + +static uint32_t dcn401_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) +{ + struct fixed31_32 pll_req; + uint32_t pll_req_reg = 0; + + /* get FbMult value */ + pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ); + + /* set up a fixed-point number + * this works because the int part is on the right edge of the register + * and the frac part is on the left edge + */ + pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); + pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; + + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); + + return dc_fixpt_floor(pll_req); +} + +static void dcn401_clock_read_ss_info(struct clk_mgr_internal *clk_mgr) +{ + struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; + int ss_info_num = bp->funcs->get_ss_entry_number( + bp, AS_SIGNAL_TYPE_GPU_PLL); + + if (ss_info_num) { + struct spread_spectrum_info info = { { 0 } }; + enum bp_result result = bp->funcs->get_spread_spectrum_info( + bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); + + /* SSInfo.spreadSpectrumPercentage !=0 would be sign + * that SS is enabled + */ + if (result == BP_RESULT_OK && + info.spread_spectrum_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = info.spread_percentage_divider; + + if (info.type.CENTER_MODE == 0) { + /* Currently for DP Reference clock we + * need only SS percentage for + * downspread + */ + clk_mgr->dprefclk_ss_percentage = + info.spread_spectrum_percentage; + } + } + } +} +static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + unsigned int i; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table; + + if (!clk_mgr->smu_present) + return; + + if (!table) + return; + + memset(table, 0, sizeof(*table)); + + /* collect valid ranges, place in pmfw table */ + for (i = 0; i < WM_SET_COUNT; i++) + if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) { + table->Watermarks.WatermarkRow[i].WmSetting = i; + table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type; + } + dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32); + dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF); + dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr); +} + +/* Set min memclk to minimum, either constrained by the current mode or DPM0 */ +static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + const struct dc *dc = clk_mgr->base.ctx->dc; + struct dc_state *context = dc->current_state; + struct dc_clocks new_clocks; + int num_steps; + + if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK)) + return; + + /* build clock update */ + memcpy(&new_clocks, &clk_mgr_base->clks, sizeof(struct dc_clocks)); + + if (current_mode) { + new_clocks.dramclk_khz = context->bw_ctx.bw.dcn.clk.dramclk_khz; + new_clocks.idle_dramclk_khz = context->bw_ctx.bw.dcn.clk.idle_dramclk_khz; + new_clocks.p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + } else { + new_clocks.dramclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz * 1000; + new_clocks.idle_dramclk_khz = new_clocks.dramclk_khz; + new_clocks.p_state_change_support = true; + } + + num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base, + context, + &new_clocks, + true); + + /* execute sequence */ + dcn401_execute_block_sequence(clk_mgr_base, num_steps); +} + +static int dcn401_get_hard_min_memclk(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.dramclk_khz; +} + +static int dcn401_get_hard_min_fclk(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz; +} + +/* Get current memclk states, update bounding box */ +static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + unsigned int num_levels; + + if (!clk_mgr->smu_present) + return; + + /* Refresh memclk and fclk states */ + dcn401_init_single_clock(clk_mgr, PPCLK_UCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, + &num_entries_per_clk->num_memclk_levels); + if (num_entries_per_clk->num_memclk_levels) { + clk_mgr_base->bw_params->max_memclk_mhz = + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz; + } + + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0; + clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz; + + dcn401_init_single_clock(clk_mgr, PPCLK_FCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, + &num_entries_per_clk->num_fclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); + if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz == + clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz) + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0; + + if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { + num_levels = num_entries_per_clk->num_memclk_levels; + } else { + num_levels = num_entries_per_clk->num_fclk_levels; + } + + clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; + + if (clk_mgr->dpm_present && !num_levels) + clk_mgr->dpm_present = false; + + clk_mgr_base->bw_params->num_channels = dcn401_smu_get_num_of_umc_channels(clk_mgr); + if (clk_mgr_base->ctx->dc_bios) { + /* use BIOS values if none provided by PMFW */ + if (clk_mgr_base->bw_params->num_channels == 0) { + clk_mgr_base->bw_params->num_channels = clk_mgr_base->ctx->dc_bios->vram_info.num_chans; + } + clk_mgr_base->bw_params->dram_channel_width_bytes = clk_mgr_base->ctx->dc_bios->vram_info.dram_channel_width_bytes; + } + + /* Refresh bounding box */ + clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( + clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); +} + +static bool dcn401_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + else if (a->dramclk_khz != b->dramclk_khz) + return false; + else if (a->p_state_change_support != b->p_state_change_support) + return false; + else if (a->fclk_p_state_change_support != b->fclk_p_state_change_support) + return false; + + return true; +} + +static void dcn401_enable_pme_wa(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn401_smu_set_pme_workaround(clk_mgr); +} + +static bool dcn401_is_smu_present(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + return clk_mgr->smu_present; +} + + +static int dcn401_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + int dtb_ref_clk_khz = 0; + + if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) { + /* DPM enabled, use currently set value */ + dtb_ref_clk_khz = clk_mgr_base->clks.ref_dtbclk_khz; + } else { + /* DPM disabled, so use boot snapshot */ + dtb_ref_clk_khz = clk_mgr_base->boot_snapshot.dtbclk; + } + + return dtb_ref_clk_khz; +} + +static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t dispclk_wdivider; + int disp_divider; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider); + disp_divider = dentist_get_divider_from_did(dispclk_wdivider); + + /* Return DISPCLK freq in Khz */ + if (disp_divider) + return (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / disp_divider; + + return 0; +} + +static struct clk_mgr_funcs dcn401_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz, + .update_clocks = dcn401_update_clocks, + .dump_clk_registers = dcn401_dump_clk_registers, + .init_clocks = dcn401_init_clocks, + .notify_wm_ranges = dcn401_notify_wm_ranges, + .set_hard_min_memclk = dcn401_set_hard_min_memclk, + .get_memclk_states_from_smu = dcn401_get_memclk_states_from_smu, + .are_clock_states_equal = dcn401_are_clock_states_equal, + .enable_pme_wa = dcn401_enable_pme_wa, + .is_smu_present = dcn401_is_smu_present, + .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist, + .get_hard_min_memclk = dcn401_get_hard_min_memclk, + .get_hard_min_fclk = dcn401_get_hard_min_fclk, +}; + +struct clk_mgr_internal *dcn401_clk_mgr_construct( + struct dc_context *ctx, + struct dccg *dccg) +{ + struct clk_log_info log_info = {0}; + struct dcn401_clk_mgr *clk_mgr401 = kzalloc(sizeof(struct dcn401_clk_mgr), GFP_KERNEL); + struct clk_mgr_internal *clk_mgr; + + if (!clk_mgr401) + return NULL; + + clk_mgr = &clk_mgr401->base; + clk_mgr->base.ctx = ctx; + clk_mgr->base.funcs = &dcn401_funcs; + clk_mgr->regs = &clk_mgr_regs_dcn401; + clk_mgr->clk_mgr_shift = &clk_mgr_shift_dcn401; + clk_mgr->clk_mgr_mask = &clk_mgr_mask_dcn401; + + clk_mgr->dccg = dccg; + clk_mgr->dfs_bypass_disp_clk = 0; + + clk_mgr->dprefclk_ss_percentage = 0; + clk_mgr->dprefclk_ss_divider = 1000; + clk_mgr->ss_on_dprefclk = false; + clk_mgr->dfs_ref_freq_khz = 100000; + + /* Changed from DCN3.2_clock_frequency doc to match + * dcn401_dump_clk_registers from 4 * dentist_vco_freq_khz / + * dprefclk DID divider + */ + clk_mgr->base.dprefclk_khz = 720000; //TODO update from VBIOS + + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dcn401_get_vco_frequency_from_reg(clk_mgr); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 4500000; //TODO Update from VBIOS + + dcn401_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); + + if (ctx->dc->debug.disable_dtb_ref_clk_switch && + clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) { + clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk; + } + + if (clk_mgr->base.boot_snapshot.dprefclk != 0) { + clk_mgr->base.dprefclk_khz = clk_mgr->base.boot_snapshot.dprefclk; + } + dcn401_clock_read_ss_info(clk_mgr); + + clk_mgr->dfs_bypass_enabled = false; + + clk_mgr->smu_present = false; + + clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + kfree(clk_mgr); + return NULL; + } + + /* need physical address of table to give to PMFW */ + clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, + DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), + &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + kfree(clk_mgr->base.bw_params); + return NULL; + } + + return &clk_mgr401->base; +} + +void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) +{ + kfree(clk_mgr->base.bw_params); + + if (clk_mgr->wm_range_table) + dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, + clk_mgr->wm_range_table); +} + diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h new file mode 100644 index 000000000000..6c9ae5ca2c7e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#ifndef __DCN401_CLK_MGR_H_ +#define __DCN401_CLK_MGR_H_ + +#define DCN401_CLK_MGR_MAX_SEQUENCE_SIZE 30 + +union dcn401_clk_mgr_block_sequence_params { + struct { + /* inputs */ + uint32_t num_displays; + } update_num_displays_params; + struct { + /* inputs */ + uint32_t ppclk; + uint16_t freq_mhz; + /* outputs */ + uint32_t *response; + } update_hardmin_params; + struct { + /* inputs */ + uint32_t ppclk; + int freq_khz; + /* outputs */ + uint32_t *response; + } update_hardmin_optimized_params; + struct { + /* inputs */ + uint16_t uclk_mhz; + uint16_t fclk_mhz; + } update_idle_hardmin_params; + struct { + /* inputs */ + uint16_t freq_mhz; + } update_deep_sleep_dcfclk_params; + struct { + /* inputs */ + bool support; + } update_pstate_support_params; + struct { + /* inputs */ + unsigned int num_ways; + } update_cab_for_uclk_params; + struct { + /* inputs */ + bool enable; + } update_wait_for_dmub_ack_params; + struct { + /* inputs */ + bool mod_drr_for_pstate; + } indicate_drr_status_params; + struct { + /* inputs */ + struct dc_state *context; + int *ref_dppclk_khz; + bool safe_to_lower; + } update_dppclk_dto_params; + struct { + /* inputs */ + struct dc_state *context; + int *ref_dtbclk_khz; + } update_dtbclk_dto_params; + struct { + /* inputs */ + struct dc_state *context; + } update_dentist_params; + struct { + /* inputs */ + struct dmcu *dmcu; + unsigned int wait; + } update_psr_wait_loop_params; +}; + +enum dcn401_clk_mgr_block_sequence_func { + CLK_MGR401_READ_CLOCKS_FROM_DENTIST, + CLK_MGR401_UPDATE_NUM_DISPLAYS, + CLK_MGR401_UPDATE_HARDMIN_PPCLK, + CLK_MGR401_UPDATE_HARDMIN_PPCLK_OPTIMIZED, + CLK_MGR401_UPDATE_ACTIVE_HARDMINS, + CLK_MGR401_UPDATE_IDLE_HARDMINS, + CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK, + CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT, + CLK_MGR401_UPDATE_UCLK_PSTATE_SUPPORT, + CLK_MGR401_UPDATE_CAB_FOR_UCLK, + CLK_MGR401_UPDATE_WAIT_FOR_DMUB_ACK, + CLK_MGR401_INDICATE_DRR_STATUS, + CLK_MGR401_UPDATE_DPPCLK_DTO, + CLK_MGR401_UPDATE_DTBCLK_DTO, + CLK_MGR401_UPDATE_DENTIST, + CLK_MGR401_UPDATE_PSR_WAIT_LOOP, + CLK_MGR401_UPDATE_SUBVP_HARDMINS, +}; + +struct dcn401_clk_mgr_block_sequence { + union dcn401_clk_mgr_block_sequence_params params; + enum dcn401_clk_mgr_block_sequence_func func; +}; + +struct dcn401_clk_mgr { + struct clk_mgr_internal base; + + struct dcn401_clk_mgr_block_sequence block_sequence[DCN401_CLK_MGR_MAX_SEQUENCE_SIZE]; +}; + +void dcn401_init_clocks(struct clk_mgr *clk_mgr_base); + +struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx, + struct dccg *dccg); + +void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr); + +#endif /* __DCN401_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c new file mode 100644 index 000000000000..21c35528f61f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "dcn401_clk_mgr_smu_msg.h" + +#include "clk_mgr_internal.h" +#include "reg_helper.h" + +#include "dalsmc.h" +#include "dcn401_smu14_driver_if.h" + +#define mmDAL_MSG_REG 0x1628A +#define mmDAL_ARG_REG 0x16273 +#define mmDAL_RESP_REG 0x16274 + +#define REG(reg_name) \ + mm ## reg_name + +#include "logger_types.h" + +#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } + +/* temporary define */ +#ifndef DALSMC_MSG_SubvpUclkFclk +#define DALSMC_MSG_SubvpUclkFclk 0x1B +#endif +#ifndef DALSMC_MSG_GetNumUmcChannels +#define DALSMC_MSG_GetNumUmcChannels 0x1C +#endif + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn401_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) +{ + uint32_t reg = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + return reg; +} + +static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out) +{ + /* Wait for response register to be ready */ + dcn401_smu_wait_for_response(clk_mgr, 10, 200000); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + /* Wait for response */ + if (dcn401_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + return true; + } + + return false; +} + +/* + * Use these functions to return back delay information so we can aggregate the total + * delay when requesting hardmin clk + * + * dcn401_smu_wait_for_response_delay + * dcn401_smu_send_msg_with_param_delay + * + */ +static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries, unsigned int *total_delay_us) +{ + uint32_t reg = 0; + *total_delay_us = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + *total_delay_us += delay_us; + } while (max_retries--); + + TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + + return reg; +} + +static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out, unsigned int *total_delay_us) +{ + unsigned int delay1_us, delay2_us; + *total_delay_us = 0; + + /* Wait for response register to be ready */ + dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + + /* Wait for response */ + if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + *total_delay_us = delay1_us + delay2_us; + return true; + } + + *total_delay_us = delay1_us + 2000000; + return false; +} + +void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support) +{ + smu_print("FCLK P-state support value is : %d\n", support); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetFclkSwitchAllow, support, NULL); +} + +void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support) +{ + smu_print("UCLK P-state support value is : %d\n", support); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetUclkPstateAllow, support, NULL); +} + +void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways) +{ + uint32_t param = (num_ways << 1) | (num_ways > 0); + + dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, param, NULL); + smu_print("Numways for SubVP : %d\n", num_ways); +} + +void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) +{ + smu_print("SMU Transfer WM table DRAM 2 SMU\n"); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL); +} + +void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr) +{ + smu_print("SMU Set PME workaround\n"); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_BacoAudioD3PME, 0, NULL); +} + +static unsigned int dcn401_smu_get_hard_min_status(struct clk_mgr_internal *clk_mgr, bool *no_timeout, unsigned int *total_delay_us) +{ + uint32_t response = 0; + + /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */ + uint32_t param = 0; + + *no_timeout = dcn401_smu_send_msg_with_param_delay(clk_mgr, + DALSMC_MSG_ReturnHardMinStatus, param, &response, total_delay_us); + + smu_print("SMU Get hard min status: no_timeout %d delay %d us clk bits %x\n", + *no_timeout, *total_delay_us, response); + + return response; +} + +static bool dcn401_smu_wait_hard_min_status(struct clk_mgr_internal *clk_mgr, uint32_t ppclk) +{ + const unsigned int max_delay_us = 1000000; + + unsigned int hardmin_status_mask = (1 << ppclk); + unsigned int total_delay_us = 0; + bool hardmin_done = false; + + while (!hardmin_done && total_delay_us < max_delay_us) { + unsigned int hardmin_status; + unsigned int read_total_delay_us; + bool no_timeout; + + if (!hardmin_done && total_delay_us > 0) { + /* hardmin not yet fulfilled, wait 500us and retry*/ + udelay(500); + total_delay_us += 500; + + smu_print("SMU Wait hard min status for %d us\n", total_delay_us); + } + + hardmin_status = dcn401_smu_get_hard_min_status(clk_mgr, &no_timeout, &read_total_delay_us); + total_delay_us += read_total_delay_us; + hardmin_done = hardmin_status & hardmin_status_mask; + } + + return hardmin_done; +} + +/* Returns the actual frequency that was set in MHz, 0 on failure */ +unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz) +{ + uint32_t response = 0; + bool hard_min_done = false; + + /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */ + uint32_t param = (clk << 16) | freq_mhz; + + smu_print("SMU Set hard min by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetHardMinByFreq, param, &response); + + /* wait until hardmin acknowledged */ + hard_min_done = dcn401_smu_wait_hard_min_status(clk_mgr, clk); + smu_print("SMU Frequency set = %d KHz hard_min_done %d\n", response, hard_min_done); + + return response; +} + +void dcn401_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable) +{ + smu_print("SMU to wait for DMCUB ack for MCLK : %d\n", enable); + + dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetAlwaysWaitDmcubResp, enable ? 1 : 0, NULL); +} + +void dcn401_smu_indicate_drr_status(struct clk_mgr_internal *clk_mgr, bool mod_drr_for_pstate) +{ + smu_print("SMU Set indicate drr status = %d\n", mod_drr_for_pstate); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_IndicateDrrStatus, mod_drr_for_pstate ? 1 : 0, NULL); +} + +bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz) +{ + uint32_t response = 0; + bool success; + + /* 15:0 for uclk, 32:16 for fclk */ + uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz; + + smu_print("SMU Set idle hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz); + + success = dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_IdleUclkFclk, param, &response); + + /* wait until hardmin acknowledged */ + success &= dcn401_smu_wait_hard_min_status(clk_mgr, PPCLK_UCLK); + smu_print("SMU hard_min_done %d\n", success); + + return success; +} + +bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz) +{ + uint32_t response = 0; + bool success; + + /* 15:0 for uclk, 32:16 for fclk */ + uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz; + + smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz); + + success = dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_ActiveUclkFclk, param, &response); + + /* wait until hardmin acknowledged */ + success &= dcn401_smu_wait_hard_min_status(clk_mgr, PPCLK_UCLK); + smu_print("SMU hard_min_done %d\n", success); + + return success; +} + +bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz) +{ + uint32_t response = 0; + bool success; + + /* 15:0 for uclk, 32:16 for fclk */ + uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz; + + smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz); + + success = dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SubvpUclkFclk, param, &response); + + return success; +} + +void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz) +{ + smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SetMinDeepSleepDcfclk, freq_mhz, NULL); +} + +void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays) +{ + smu_print("SMU Set num of displays: num_displays = %d\n", num_displays); + + dcn401_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_NumOfDisplays, num_displays, NULL); +} + +unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr) +{ + unsigned int response = 0; + + dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_GetNumUmcChannels, 0, &response); + + smu_print("SMU Get Num UMC Channels: num_umc_channels = %d\n", response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h new file mode 100644 index 000000000000..e02eb1294b37 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#ifndef __DCN401_CLK_MGR_SMU_MSG_H_ +#define __DCN401_CLK_MGR_SMU_MSG_H_ + +#include "os_types.h" +#include "core_types.h" +#include "dcn32/dcn32_clk_mgr_smu_msg.h" + +void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support); +void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support); +void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); +void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); +void dcn401_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); +void dcn401_smu_indicate_drr_status(struct clk_mgr_internal *clk_mgr, bool mod_drr_for_pstate); +bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz); +bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz); +bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr, + uint16_t uclk_freq_mhz, + uint16_t fclk_freq_mhz); +void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz); +void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays); +unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr); + +#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_smu14_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_smu14_driver_if.h new file mode 100644 index 000000000000..36034b32870c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_smu14_driver_if.h @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. +// +// This is a stripped-down version of the smu13_driver_if.h file for the relevant DAL interfaces. + +#define SMU14_DRIVER_IF_VERSION 0x1 + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DISPCLK, + PPCLK_DPPCLK, + PPCLK_DPREFCLK, + PPCLK_DCFCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef struct { + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + uint32_t Spare[16]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +// Table types +#define TABLE_PMFW_PPTABLE 0 +#define TABLE_COMBO_PPTABLE 1 +#define TABLE_WATERMARKS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_DRIVER_INFO 10 +#define TABLE_ECCINFO 11 +#define TABLE_COUNT 12 |