diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c')
| -rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 502 |
1 files changed, 254 insertions, 248 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ce50ef46e73f..18d5d0704509 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -94,8 +94,7 @@ */ #define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300 -static const struct smu_temperature_range smu13_thermal_policy[] = -{ +static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; @@ -196,7 +195,7 @@ static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUN ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF_BIT), ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL_BIT), ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, FEATURE_OUT_OF_BAND_MONITOR_BIT), - ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN), + ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DWN), ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), }; @@ -258,15 +257,40 @@ static int aldebaran_tables_init(struct smu_context *smu) } smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); - if (!smu_table->ecc_table) + if (!smu_table->ecc_table) { + kfree(smu_table->metrics_table); + kfree(smu_table->gpu_metrics_table); return -ENOMEM; + } return 0; } +static int aldebaran_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + * back for other dies, only send it on master die. + */ + if (adev->smuio.funcs->get_socket_id(adev) || + adev->smuio.funcs->get_die_id(adev)) + return 0; + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else + return -EINVAL; +} + static int aldebaran_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -274,6 +298,20 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = aldebaran_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -304,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -468,18 +561,12 @@ static bool aldebaran_is_primary(struct smu_context *smu) static int aldebaran_run_board_btc(struct smu_context *smu) { - u32 smu_version; int ret; if (!aldebaran_is_primary(smu)) return 0; - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (ret) { - dev_err(smu->adev->dev, "Failed to get smu version!\n"); - return ret; - } - if (smu_version <= 0x00441d00) + if (smu->smc_fw_version <= 0x00441d00) return 0; ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL); @@ -551,9 +638,9 @@ static int aldebaran_populate_umd_state_clk(struct smu_context *smu) return 0; } -static int aldebaran_get_clk_table(struct smu_context *smu, - struct pp_clock_levels_with_latency *clocks, - struct smu_13_0_dpm_table *dpm_table) +static void aldebaran_get_clk_table(struct smu_context *smu, + struct pp_clock_levels_with_latency *clocks, + struct smu_13_0_dpm_table *dpm_table) { uint32_t i; @@ -567,7 +654,6 @@ static int aldebaran_get_clk_table(struct smu_context *smu, clocks->data[i].latency_in_us = 0; } - return 0; } static int aldebaran_freqs_in_same_level(int32_t frequency1, @@ -580,7 +666,7 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -626,9 +712,10 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, break; case METRICS_AVERAGE_SOCKETPOWER: /* Valid power data is available only from primary die */ - *value = aldebaran_is_primary(smu) ? - metrics->AverageSocketPower << 8 : - 0; + if (aldebaran_is_primary(smu)) + *value = metrics->AverageSocketPower << 8; + else + ret = -EOPNOTSUPP; break; case METRICS_TEMPERATURE_EDGE: *value = metrics->TemperatureEdge * @@ -736,25 +823,26 @@ static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu, value); } -static int aldebaran_print_clk_levels(struct smu_context *smu, - enum smu_clk_type type, char *buf) +static int aldebaran_emit_clk_levels(struct smu_context *smu, + enum smu_clk_type type, char *buf, int *offset) { - int i, now, size = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct pp_clock_levels_with_latency clocks; struct smu_13_0_dpm_table *single_dpm_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_13_0_dpm_context *dpm_context = NULL; + uint32_t i; int display_levels; uint32_t freq_values[3] = {0}; - uint32_t min_clk, max_clk; - - smu_cmn_get_sysfs_buf(&buf, &size); + uint32_t min_clk, max_clk, cur_value = 0; + bool freq_match; + unsigned int clock_mhz; + static const char attempt_string[] = "Attempt to get current"; if (amdgpu_ras_intr_triggered()) { - size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + *offset += sysfs_emit_at(buf, *offset, "unavailable\n"); + return -EBUSY; } dpm_context = smu_dpm->dpm_context; @@ -762,21 +850,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + return 0; case SMU_SCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!"); + dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.gfx_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!"); - return ret; - } + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); display_levels = (clocks.num_levels == 1) ? 1 : 2; @@ -787,147 +874,111 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, freq_values[1] = max_clk; /* fine-grained dpm has only 2 levels */ - if (now > min_clk && now < max_clk) { + if (cur_value > min_clk && cur_value < max_clk) { display_levels++; freq_values[2] = max_clk; - freq_values[1] = now; + freq_values[1] = cur_value; } - - for (i = 0; i < display_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, - freq_values[i], - (display_levels == 1) ? - "*" : - (aldebaran_freqs_in_same_level( - freq_values[i], now) ? - "*" : - "")); - break; case SMU_OD_MCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + return 0; case SMU_MCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current mclk Failed!"); + dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.uclk_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!"); - return ret; - } - - for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.num_levels == 1) ? "*" : - (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); break; case SMU_SOCCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current socclk Failed!"); + dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.soc_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!"); - return ret; - } - - for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.num_levels == 1) ? "*" : - (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); break; case SMU_FCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current fclk Failed!"); + dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.fclk_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!"); - return ret; - } - - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", - i, single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? "*" : - (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); break; case SMU_VCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current vclk Failed!"); + dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.vclk_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!"); - return ret; - } - - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", - i, single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? "*" : - (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); break; case SMU_DCLK: - ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now); + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value); if (ret) { - dev_err(smu->adev->dev, "Attempt to get current dclk Failed!"); + dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string); return ret; } single_dpm_table = &(dpm_context->dpm_tables.dclk_table); - ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!"); - return ret; - } - - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", - i, single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? "*" : - (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + aldebaran_get_clk_table(smu, &clocks, single_dpm_table); break; default: + return -EINVAL; + } + + switch (type) { + case SMU_SCLK: + for (i = 0; i < display_levels; i++) { + clock_mhz = freq_values[i]; + freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value); + freq_match |= (display_levels == 1); + + *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n", i, + clock_mhz, + (freq_match) ? "*" : ""); + } + break; + + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_DCLK: + for (i = 0; i < clocks.num_levels; i++) { + clock_mhz = clocks.data[i].clocks_in_khz / 1000; + freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value); + freq_match |= (clocks.num_levels == 1); + + *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n", + i, clock_mhz, + (freq_match) ? "*" : ""); + } break; + default: + return -EINVAL; } - return size; + return 0; } static int aldebaran_upload_dpm_level(struct smu_context *smu, @@ -1095,16 +1146,6 @@ static int aldebaran_get_current_activity_percent(struct smu_context *smu, return ret; } -static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value) -{ - if (!value) - return -EINVAL; - - return aldebaran_get_smu_metrics_data(smu, - METRICS_AVERAGE_SOCKETPOWER, - value); -} - static int aldebaran_thermal_get_temperature(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value) @@ -1158,8 +1199,10 @@ static int aldebaran_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: - ret = aldebaran_get_gpu_power(smu, (uint32_t *)data); + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: @@ -1184,6 +1227,7 @@ static int aldebaran_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -1193,9 +1237,10 @@ static int aldebaran_read_sensor(struct smu_context *smu, } static int aldebaran_get_power_limit(struct smu_context *smu, - uint32_t *current_power_limit, - uint32_t *default_power_limit, - uint32_t *max_power_limit) + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) { PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t power_limit = 0; @@ -1208,7 +1253,8 @@ static int aldebaran_get_power_limit(struct smu_context *smu, *default_power_limit = 0; if (max_power_limit) *max_power_limit = 0; - + if (min_power_limit) + *min_power_limit = 0; dev_warn(smu->adev->dev, "PPT feature is not enabled, power values can't be fetched."); @@ -1243,6 +1289,9 @@ static int aldebaran_get_power_limit(struct smu_context *smu, *max_power_limit = pptable->PptLimit; } + if (min_power_limit) + *min_power_limit = 0; + return 0; } @@ -1276,6 +1325,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + int r; /* Disable determinism if switching to another mode */ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && @@ -1288,7 +1338,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: return 0; - + case AMD_DPM_FORCED_LEVEL_AUTO: + r = smu_v13_0_set_performance_level(smu, level); + if (!r) + smu_v13_0_reset_custom_level(smu); + return r; case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: @@ -1303,9 +1357,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1334,7 +1389,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1354,7 +1409,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1428,7 +1483,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = aldebaran_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk, false); + if (ret) + return ret; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1447,7 +1506,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: @@ -1576,40 +1635,28 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) smu_i2c->port = 0; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &aldebaran_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); control->quirks = &aldebaran_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - i2c_del_adapter(control); - - return res; } static void aldebaran_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -1626,15 +1673,13 @@ static void aldebaran_get_unique_id(struct smu_context *smu) out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; - if (adev->serial[0] == '\0') - sprintf(adev->serial, "%016llx", adev->unique_id); } -static bool aldebaran_is_baco_supported(struct smu_context *smu) +static int aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ - return false; + return 0; } static int aldebaran_set_df_cstate(struct smu_context *smu, @@ -1652,22 +1697,6 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) -{ - struct amdgpu_device *adev = smu->adev; - - /* The message only works on master die and NACK will be sent - back for other dies, only send it on master die */ - if (!adev->smuio.funcs->get_socket_id(adev) && - !adev->smuio.funcs->get_die_id(adev)) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - en ? 0 : 1, - NULL); - else - return 0; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -1681,7 +1710,7 @@ static const struct throttling_logging_label { static void aldebaran_log_thermal_throttling_event(struct smu_context *smu) { int ret; - int throttler_idx, throtting_events = 0, buf_idx = 0; + int throttler_idx, throttling_events = 0, buf_idx = 0; struct amdgpu_device *adev = smu->adev; uint32_t throttler_status; char log_buf[256]; @@ -1696,11 +1725,11 @@ static void aldebaran_log_thermal_throttling_event(struct smu_context *smu) for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); throttler_idx++) { if (throttler_status & logging_label[throttler_idx].feature_mask) { - throtting_events++; + throttling_events++; buf_idx += snprintf(log_buf + buf_idx, sizeof(log_buf) - buf_idx, "%s%s", - throtting_events > 1 ? " and " : "", + throttling_events > 1 ? " and " : "", logging_label[throttler_idx].label); if (buf_idx >= sizeof(log_buf)) { dev_err(adev->dev, "buffer overflow!\n"); @@ -1724,8 +1753,8 @@ static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu) /* TODO: confirm this on real target */ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); - if ((esm_ctrl >> 15) & 0x1FFFF) - return (((esm_ctrl >> 8) & 0x3F) + 128); + if ((esm_ctrl >> 15) & 0x1) + return (((esm_ctrl >> 8) & 0x7F) + 128); return smu_v13_0_get_current_pcie_link_speed(smu); } @@ -1741,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, ret = smu_cmn_get_metrics_table(smu, &metrics, - true); + false); if (ret) return ret; @@ -1756,7 +1785,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; - gpu_metrics->average_mm_activity = 0; /* Valid power data is available only from primary die */ if (aldebaran_is_primary(smu)) { @@ -1788,10 +1816,12 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = 0; - gpu_metrics->pcie_link_width = - smu_v13_0_get_current_pcie_link_width(smu); - gpu_metrics->pcie_link_speed = - aldebaran_get_current_pcie_link_speed(smu); + if (!amdgpu_sriov_vf(smu->adev)) { + gpu_metrics->pcie_link_width = + smu_v13_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + aldebaran_get_current_pcie_link_speed(smu); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); @@ -1812,24 +1842,15 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, static int aldebaran_check_ecc_table_support(struct smu_context *smu, int *ecctable_version) { - uint32_t if_version = 0xff, smu_version = 0xff; - int ret = 0; - - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) { - /* return not support if failed get smu_version */ - ret = -EOPNOTSUPP; - } - - if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION) - ret = -EOPNOTSUPP; - else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION && - smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION) + if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION) + return -EOPNOTSUPP; + else if (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_VERSION && + smu->smc_fw_version < SUPPORT_ECCTABLE_V2_SMU_VERSION) *ecctable_version = 1; else *ecctable_version = 2; - return ret; + return 0; } static ssize_t aldebaran_get_ecc_info(struct smu_context *smu, @@ -1892,10 +1913,9 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu, static int aldebaran_mode1_reset(struct smu_context *smu) { - u32 smu_version, fatal_err, param; + u32 fatal_err, param; int ret = 0; struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; @@ -1903,15 +1923,13 @@ static int aldebaran_mode1_reset(struct smu_context *smu) /* * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 */ - smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (smu_version < 0x00440700) { + if (smu->smc_fw_version < 0x00440700) { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); - } - else { + } else { /* fatal error triggered by ras, PMFW supports the flag from 68.44.0 */ - if ((smu_version >= 0x00442c00) && ras && - atomic_read(&ras->in_recovery)) + if ((smu->smc_fw_version >= 0x00442c00) && + amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -1927,18 +1945,16 @@ static int aldebaran_mode1_reset(struct smu_context *smu) static int aldebaran_mode2_reset(struct smu_context *smu) { - u32 smu_version; int ret = 0, index; struct amdgpu_device *adev = smu->adev; int timeout = 10; - smu_cmn_get_smc_version(smu, NULL, &smu_version); - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); - + if (index < 0 ) + return -EINVAL; mutex_lock(&smu->message_lock); - if (smu_version >= 0x00441400) { + if (smu->smc_fw_version >= 0x00441400) { ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); /* This is similar to FLR, wait till max FLR timeout */ msleep(100); @@ -1965,7 +1981,7 @@ static int aldebaran_mode2_reset(struct smu_context *smu) } else { dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", - smu_version); + smu->smc_fw_version); } if (ret == 1) @@ -1988,14 +2004,20 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) { #if 0 struct amdgpu_device *adev = smu->adev; - u32 smu_version; uint32_t val; + uint32_t smu_version; + int ret; + /** * PM FW version support mode1 reset from 68.07 */ - smu_cmn_get_smc_version(smu, NULL, &smu_version); + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return false; + if ((smu_version < 0x00440700)) return false; + /** * mode1 reset relies on PSP, so we should check if * PSP is alive. @@ -2007,11 +2029,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int aldebaran_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -2039,19 +2056,10 @@ static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu, static int aldebaran_check_bad_channel_info_support(struct smu_context *smu) { - uint32_t if_version = 0xff, smu_version = 0xff; - int ret = 0; - - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) { - /* return not support if failed get smu_version */ - ret = -EOPNOTSUPP; - } + if (smu->smc_fw_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION) + return -EOPNOTSUPP; - if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION) - ret = -EOPNOTSUPP; - - return ret; + return 0; } static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu, @@ -2079,7 +2087,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_default_dpm_table = aldebaran_set_default_dpm_table, .populate_umd_state_clk = aldebaran_populate_umd_state_clk, .get_thermal_temperature_range = aldebaran_get_thermal_temperature_range, - .print_clk_levels = aldebaran_print_clk_levels, + .emit_clk_levels = aldebaran_emit_clk_levels, .force_clk_levels = aldebaran_force_clk_levels, .read_sensor = aldebaran_read_sensor, .set_performance_level = aldebaran_set_performance_level, @@ -2116,18 +2124,16 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= aldebaran_is_baco_supported, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_bamaco_support = aldebaran_get_bamaco_support, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, - .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down, .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, - .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, |
