summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c90
1 files changed, 50 insertions, 40 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index da1f43999d09..a55ea76d7399 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -301,7 +303,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
- *value = metrics->UvdActivity;
+ *value = metrics->UvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) /
@@ -459,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
return smu_v11_0_init_smc_tables(smu);
}
-static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int vangogh_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -976,6 +980,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
}
}
if (min) {
+ ret = vangogh_get_profiling_clk_mask(smu,
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
+ NULL,
+ NULL,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ vclk_mask = dclk_mask = 0;
+
switch (clk_type) {
case SMU_UCLK:
case SMU_MCLK:
@@ -1040,48 +1056,34 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+static int vangogh_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
-
- if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
+ u32 backend_workload_mask = 0;
+ int ret;
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
- profile_mode);
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
- workload_type);
+ dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
+ workload_mask);
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
int ret = 0;
@@ -1287,7 +1289,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1323,7 +1325,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
@@ -1342,7 +1344,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false);
if (ret)
return ret;
@@ -1350,7 +1352,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false);
if (ret)
return ret;
@@ -1358,7 +1360,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false);
if (ret)
return ret;
@@ -1366,7 +1368,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false);
if (ret)
return ret;
@@ -1507,6 +1509,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -2444,6 +2452,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
start, &residency);
+ if (ret)
+ return ret;
if (!start)
adev->gfx.gfx_off_residency = residency;