summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c881
1 files changed, 874 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 8cb4fcee9a2c..a79dd04f81a2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -31,6 +31,9 @@
#include "smu_v11_5_ppsmc.h"
#include "smu_v11_5_pmfw.h"
#include "smu_cmn.h"
+#include "soc15_common.h"
+#include "asic_reg/gc/gc_10_3_0_offset.h"
+#include "asic_reg/gc/gc_10_3_0_sh_mask.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -118,6 +121,7 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
+ MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -162,6 +166,9 @@ static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(A55_DPM),
FEA_MAP(CVIP_DSP_DPM),
FEA_MAP(MSMU_LOW_POWER),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP_HALF_REVERSE(GFX),
};
static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
@@ -242,6 +249,12 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_SOCCLK:
*value = metrics->SocclkFrequency;
break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->DclkFrequency;
+ break;
case METRICS_AVERAGE_UCLK:
*value = metrics->MemclkFrequency;
break;
@@ -252,7 +265,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
@@ -366,6 +380,10 @@ static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT)
| FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)
+ | FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)
+ | FEATURE_MASK(FEATURE_VCN_DPM_BIT)
+ | FEATURE_MASK(FEATURE_FCLK_DPM_BIT)
+ | FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
@@ -379,6 +397,12 @@ static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT);
+ if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FCLK_DPM_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT);
+
if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
@@ -402,10 +426,63 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].vclk;
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].dclk;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].memclk;
+
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].fclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vangogh_print_fine_grain_clk(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int size = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -424,13 +501,733 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
}
break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
default:
break;
}
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *vclk_mask,
+ uint32_t *dclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *fclk_mask,
+ uint32_t *soc_mask)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (fclk_mask)
+ *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (soc_mask)
+ *soc_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ }
+
+ return 0;
+}
+
+bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, feature_id))
+ return false;
+
+ return true;
+}
+
+static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t soc_mask;
+ uint32_t vclk_mask;
+ uint32_t dclk_mask;
+ uint32_t mclk_mask;
+ uint32_t fclk_mask;
+ uint32_t clock_limit;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+ if (max) {
+ ret = vangogh_get_profiling_clk_mask(smu,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+ if (min) {
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+failed:
+ return ret;
+}
+
+static int vangogh_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "FULL_SCREEN_3D",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on vangogh.
+ */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
return size;
}
+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+ if (workload_type < 0) {
+ dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+ profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+ 1 << workload_type,
+ NULL);
+ if (ret) {
+ dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+ workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min << 16, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max << 16, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_DCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0 ;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_SOCCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_MCLK,
+ SMU_FCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_unforce_dpm_levels(struct smu_context *smu)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
+ {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+
+ if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t socclk_freq = 0, fclk_freq = 0;
+ uint32_t vclk_freq = 0, dclk_freq = 0;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int vangogh_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t soc_mask, mclk_mask, fclk_mask;
+ uint32_t vclk_mask = 0, dclk_mask = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = vangogh_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = vangogh_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = vangogh_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
+ vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ NULL,
+ NULL,
+ &mclk_mask,
+ &fclk_mask,
+ NULL);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_peak_clock_by_device(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
static int vangogh_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
@@ -513,7 +1310,7 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
if (clock_ranges) {
if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
@@ -631,14 +1428,16 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
if (input[0] == 0) {
if (input[1] < smu->gfx_default_hard_min_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq);
return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} else if (input[0] == 1) {
if (input[1] > smu->gfx_default_soft_max_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq);
return -EINVAL;
}
@@ -676,8 +1475,10 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
return -EINVAL;
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
- dev_err(smu->adev->dev, "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
- smu->gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq);
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
return -EINVAL;
}
@@ -722,6 +1523,33 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
return 0;
}
+static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i];
+ clock_table->SocClocks[i].Vol = table->SocVoltage[i];
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
+ clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
+ clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
+ return 0;
+}
+
+
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
@@ -733,6 +1561,38 @@ static int vangogh_system_features_control(struct smu_context *smu, bool en)
return 0;
}
+static int vangogh_post_smu_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t tmp;
+ uint8_t aon_bits = 0;
+ /* Two CUs in one WGP */
+ uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
+ uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* if all CUs are active, no need to power off any WGPs */
+ if (total_cu == adev->gfx.cu_info.number)
+ return 0;
+
+ /*
+ * Calculate the total bits number of always on WGPs for all SA/SEs in
+ * RLC_PG_ALWAYS_ON_WGP_MASK.
+ */
+ tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
+ tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
+
+ aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* Do not request any WGPs less than set in the AON_WGP_MASK */
+ if (aon_bits > req_active_wgps) {
+ dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
+ return 0;
+ } else {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
+ }
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -761,6 +1621,13 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_profile_mode = vangogh_set_power_profile_mode,
+ .get_power_profile_mode = vangogh_get_power_profile_mode,
+ .get_dpm_clock_table = vangogh_get_dpm_clock_table,
+ .force_clk_levels = vangogh_force_clk_levels,
+ .set_performance_level = vangogh_set_performance_level,
+ .post_init = vangogh_post_smu_init,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)