diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c')
| -rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 460 |
1 files changed, 247 insertions, 213 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0bcd4fe0ef17..7c9f77124ab2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -136,7 +136,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), - MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALDisableDummyPstateChange, 0), MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0), MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0), MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), @@ -304,7 +304,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) | FEATURE_MASK(FEATURE_FW_CTF_BIT) - | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); + | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT) + | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT); if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); @@ -344,8 +345,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DPM UCLK enablement should be skipped for navi10 A0 secure board */ if (!(is_asic_secure(smu) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && - (adev->rev_id == 0)) && + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && + (adev->rev_id == 0)) && (adev->pm.pp_feature & PP_MCLK_DPM_MASK)) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) @@ -353,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */ if (is_asic_secure(smu) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && (adev->rev_id == 0)) *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); @@ -494,6 +495,8 @@ static int navi10_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct smu_table *dummy_read_1_table = + &smu_table->dummy_read_1_table; SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -513,6 +516,10 @@ static int navi10_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + dummy_read_1_table->size = 0x40000; + dummy_read_1_table->align = PAGE_SIZE; + dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -549,7 +556,7 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -635,7 +642,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -724,7 +731,7 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_legacy_t *metrics = (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -810,7 +817,7 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_t *metrics = (SmuMetrics_NV12_t *)smu_table->metrics_table; int ret = 0; @@ -900,18 +907,11 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct amdgpu_device *adev = smu->adev; - uint32_t smu_version; int ret = 0; - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (ret) { - dev_err(adev->dev, "Failed to get smu version!\n"); - return ret; - } - - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 0, 9): - if (smu_version > 0x00341C00) + if (smu->smc_fw_version > 0x00341C00) ret = navi12_get_smu_metrics_data(smu, member, value); else ret = navi12_get_legacy_smu_metrics_data(smu, member, value); @@ -919,8 +919,12 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu, case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 5): default: - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 5)) && + smu->smc_fw_version > 0x00351F00) || + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 0)) && + smu->smc_fw_version > 0x002A3B00)) ret = navi10_get_smu_metrics_data(smu, member, value); else ret = navi10_get_legacy_smu_metrics_data(smu, member, value); @@ -1131,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) return 0; } -static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int navi10_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1215,19 +1221,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, value); } -static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) { PPTable_t *pptable = smu->smu_table.driver_pptable; DpmDescriptor_t *dpm_desc = NULL; - uint32_t clk_index = 0; + int clk_index = 0; clk_index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, clk_type); + if (clk_index < 0) + return clk_index; + dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0; + return dpm_desc->SnapToDiscrete == 0 ? 1 : 0; } static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) @@ -1283,7 +1292,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu, if (ret) return ret; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); @@ -1378,8 +1391,6 @@ static int navi10_emit_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1458,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, levels, size = 0, ret = 0; + int i, levels, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1473,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1486,17 +1498,21 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_DCEFCLK: ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) - return size; + return size - start_offset; + + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) - return size; + return size - start_offset; size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); @@ -1504,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu, } else { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) - return size; + return size - start_offset; freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : @@ -1579,8 +1595,6 @@ static int navi10_print_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1640,14 +1654,14 @@ static int navi10_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int navi10_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { - int ret = 0, size = 0; + int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; @@ -1661,32 +1675,36 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ - if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (ret) { soft_max_level = (soft_max_level >= 1 ? 1 : 0); soft_min_level = (soft_min_level >= 1 ? 1 : 0); } ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) - return size; + return 0; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); if (ret) - return size; + return 0; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) - return size; + return 0; break; case SMU_DCEFCLK: - dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + dev_info(smu->adev->dev, "Setting DCEFCLK min/max dpm level is not supported!\n"); break; default: break; } - return size; + return 0; } static int navi10_populate_umd_state_clk(struct smu_context *smu) @@ -1705,7 +1723,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) uint32_t sclk_freq; pstate_table->gfxclk_pstate.min = gfx_table->min; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 0, 0): switch (adev->pdev->revision) { case 0xf0: /* XTX */ @@ -1974,7 +1992,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor.Mem_FPS, activity_monitor.Mem_MinFreqStep, activity_monitor.Mem_MinActiveFreqType, @@ -1989,81 +2007,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) return size; } -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define NAVI10_CUSTOM_PARAMS_COUNT 10 +#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3 +#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int navi10_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_MinFreqStep = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor.Soc_FPS = input[idx + 1]; + activity_monitor.Soc_MinFreqStep = input[idx + 2]; + activity_monitor.Soc_MinActiveFreqType = input[idx + 3]; + activity_monitor.Soc_MinActiveFreq = input[idx + 4]; + activity_monitor.Soc_BoosterFreqType = input[idx + 5]; + activity_monitor.Soc_BoosterFreq = input[idx + 6]; + activity_monitor.Soc_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_MinFreqStep = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } +static int navi10_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_MinFreqStep = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor.Soc_FPS = input[1]; - activity_monitor.Soc_MinFreqStep = input[2]; - activity_monitor.Soc_MinActiveFreqType = input[3]; - activity_monitor.Soc_MinActiveFreq = input[4]; - activity_monitor.Soc_BoosterFreqType = input[5]; - activity_monitor.Soc_BoosterFreq = input[6]; - activity_monitor.Soc_PD_Data_limit_c = input[7]; - activity_monitor.Soc_PD_Data_error_coeff = input[8]; - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memlk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_MinFreqStep = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), true); + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT) + return -EINVAL; + idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = navi10_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2175,7 +2234,7 @@ static int navi10_read_sensor(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - if(!data || !size) + if (!data || !size) return -EINVAL; switch (sensor) { @@ -2195,7 +2254,7 @@ static int navi10_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -2233,6 +2292,7 @@ static int navi10_read_sensor(struct smu_context *smu, ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -2310,30 +2370,31 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal; uint32_t max_memory_clock = max_sustainable_clocks->uclock; - if(smu->disable_uclk_switch == disable_memory_clock_switch) + if (smu->disable_uclk_switch == disable_memory_clock_switch) return 0; - if(disable_memory_clock_switch) + if (disable_memory_clock_switch) ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0); else ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0); - if(!ret) + if (!ret) smu->disable_uclk_switch = disable_memory_clock_switch; return ret; } static int navi10_get_power_limit(struct smu_context *smu, - uint32_t *current_power_limit, - uint32_t *default_power_limit, - uint32_t *max_power_limit) + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) { struct smu_11_0_powerplay_table *powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; struct smu_11_0_overdrive_table *od_settings = smu->od_settings; PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t power_limit, od_percent; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { /* the last hope to figure out the ppt limit */ @@ -2350,31 +2411,42 @@ static int navi10_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (max_power_limit) { + if (powerplay_table) { if (smu->od_enabled && - navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { - od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = 0; + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + } + } - dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); - power_limit *= (100 + od_percent); - power_limit /= 100; - } + if (max_power_limit) { + *max_power_limit = power_limit * (100 + od_percent_upper); + *max_power_limit /= 100; + } - *max_power_limit = power_limit; + if (min_power_limit) { + *min_power_limit = power_limit * (100 - od_percent_lower); + *min_power_limit /= 100; } return 0; } static int navi10_update_pcie_parameters(struct smu_context *smu, - uint32_t pcie_gen_cap, - uint32_t pcie_width_cap) + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2383,25 +2455,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, @@ -2552,7 +2626,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu) return 0; } -static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { +static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) +{ int i; int ret = 0; struct smu_table_context *table_context = &smu->smu_table; @@ -2745,8 +2820,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu) if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) return false; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) return true; return false; @@ -2814,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu) dummy_table += 0x1000; } - amdgpu_asic_flush_hdp(smu->adev, NULL); + amdgpu_hdp_flush(smu->adev, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, @@ -2834,19 +2909,12 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint8_t umc_fw_greater_than_v136 = false; uint8_t umc_fw_disable_cdr = false; - uint32_t pmfw_version; uint32_t param; int ret = 0; if (!navi10_need_umc_cdr_workaround(smu)) return 0; - ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version); - if (ret) { - dev_err(adev->dev, "Failed to get smu version!\n"); - return ret; - } - /* * The messages below are only supported by Navi10 42.53.0 and later * PMFWs and Navi14 53.29.0 and later PMFWs. @@ -2854,8 +2922,10 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) * - PPSMC_MSG_SetDriverDummyTableDramAddrLow * - PPSMC_MSG_GetUMCFWWA */ - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) || - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) { + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && + (smu->smc_fw_version >= 0x2a3500)) || + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) && + (smu->smc_fw_version >= 0x351D00))) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GET_UMC_FW_WA, 0, @@ -2874,13 +2944,15 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) return 0; if (umc_fw_disable_cdr) { - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } else { return navi10_set_dummy_pstates_table_location(smu); } } else { - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } @@ -3014,7 +3086,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&adev->pm.mutex); if (r) goto fail; @@ -3031,6 +3102,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, } r = num_msgs; fail: + mutex_unlock(&adev->pm.mutex); kfree(req); return r; } @@ -3074,10 +3146,10 @@ static int navi10_i2c_control_init(struct smu_context *smu) control->quirks = &navi10_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -3085,27 +3157,12 @@ static int navi10_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void navi10_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -3338,18 +3395,11 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, void **table) { struct amdgpu_device *adev = smu->adev; - uint32_t smu_version; int ret = 0; - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (ret) { - dev_err(adev->dev, "Failed to get smu version!\n"); - return ret; - } - - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(11, 0, 9): - if (smu_version > 0x00341C00) + if (smu->smc_fw_version > 0x00341C00) ret = navi12_get_gpu_metrics(smu, table); else ret = navi12_get_legacy_gpu_metrics(smu, table); @@ -3357,11 +3407,15 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 5): default: - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 5)) && + smu->smc_fw_version > 0x00351F00) || + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(11, 0, 0)) && + smu->smc_fw_version > 0x002A3B00)) ret = navi10_get_gpu_metrics(smu, table); else - ret =navi10_get_legacy_gpu_metrics(smu, table); + ret = navi10_get_legacy_gpu_metrics(smu, table); break; } @@ -3376,7 +3430,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) uint32_t param = 0; /* Navi12 does not support this */ - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) return 0; /* @@ -3406,26 +3460,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } @@ -3545,9 +3581,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, |
