summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c')
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c236
1 files changed, 151 insertions, 85 deletions
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 7ef7e81525a3..9b28c0728269 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include "pp_debug.h"
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -84,15 +83,15 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static struct profile_mode_setting smu7_profiling[7] =
- {{0, 0, 0, 0, 0, 0, 0, 0},
+static struct profile_mode_setting smu7_profiling[7] = {
+ {0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
{1, 0, 5, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0},
- };
+};
#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
@@ -905,7 +904,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
dep_sclk_table->entries[i].clk;
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
data->dpm_table.sclk_table.count++;
}
}
@@ -920,7 +919,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
dep_mclk_table->entries[i].clk;
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
data->dpm_table.mclk_table.count++;
}
}
@@ -951,7 +950,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
odn_table->odn_core_clock_dpm_levels.num_of_pl =
data->golden_dpm_table.sclk_table.count;
entries = odn_table->odn_core_clock_dpm_levels.entries;
- for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
+ for (i = 0; i < data->golden_dpm_table.sclk_table.count; i++) {
entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
entries[i].enabled = true;
entries[i].vddc = dep_sclk_table->entries[i].vddc;
@@ -963,7 +962,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
odn_table->odn_memory_clock_dpm_levels.num_of_pl =
data->golden_dpm_table.mclk_table.count;
entries = odn_table->odn_memory_clock_dpm_levels.entries;
- for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
+ for (i = 0; i < data->golden_dpm_table.mclk_table.count; i++) {
entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
entries[i].enabled = true;
entries[i].vddc = dep_mclk_table->entries[i].vddc;
@@ -1501,6 +1500,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
return ret;
}
+static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ int32_t tmp_sclk, count, percentage;
+
+ if (golden_dpm_table->mclk_table.count == 1) {
+ percentage = 70;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
+ } else {
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+ }
+
+ tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
+ table_info->vdd_dep_on_sclk;
+
+ for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
+ }
+
+ hwmgr->pstate_mclk_peak =
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
@@ -1625,6 +1685,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"pcie performance request failed!", result = tmp_result);
+ smu7_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -1736,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
-static bool intel_core_rkl_chk(void)
-{
-#if IS_ENABLED(CONFIG_X86_64)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
-#else
- return false;
-#endif
-}
-
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1762,18 +1813,17 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients[1] = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+ data->voting_rights_clients[3] = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients[4] = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients[5] = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients[6] = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients[7] = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
- data->pcie_dpm_key_disabled =
- intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
@@ -1950,7 +2000,7 @@ static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
} else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
min = 900;
- max= 2100;
+ max = 2100;
} else if (hwmgr->chip_id == CHIP_POLARIS10) {
if (adev->pdev->subsystem_vendor == 0x106B) {
min = 1000;
@@ -2907,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_hwmgr *data;
int result = 0;
@@ -2924,6 +2975,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
return -EINVAL;
}
} else {
@@ -2941,38 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* Initalize Dynamic State Adjustment Rule Settings */
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (0 == result) {
- struct amdgpu_device *adev = hwmgr->adev;
+ if (result)
+ goto fail;
- data->is_tlu_enabled = false;
+ data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
SMU7_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- data->pcie_gen_cap = adev->pm.pcie_gen_mask;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- else
- data->pcie_spc_cap = 16;
- data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- smu7_thermal_parameter_init(hwmgr);
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- smu7_hwmgr_backend_fini(hwmgr);
- }
+ data->pcie_gen_cap = adev->pm.pcie_gen_mask;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ else
+ data->pcie_spc_cap = 16;
+ data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ smu7_thermal_parameter_init(hwmgr);
result = smu7_update_edc_leakage_table(hwmgr);
if (result)
- return result;
+ goto fail;
return 0;
+fail:
+ smu7_hwmgr_backend_fini(hwmgr);
+ return result;
}
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
@@ -3143,15 +3195,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
count >= 0; count--) {
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -3161,15 +3210,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -3181,8 +3227,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
- hwmgr->pstate_sclk = tmp_sclk;
- hwmgr->pstate_mclk = tmp_mclk;
return 0;
}
@@ -3195,9 +3239,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -3273,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
const struct pp_power_state *current_ps)
{
struct amdgpu_device *adev = hwmgr->adev;
- struct smu7_power_state *smu7_ps =
- cast_phw_smu7_power_state(&request_ps->hardware);
+ struct smu7_power_state *smu7_ps;
uint32_t sclk;
uint32_t mclk;
struct PP_Clocks minimum_clocks = {0};
@@ -3291,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t latency;
bool latency_allowed = false;
+ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
+ if (!smu7_ps)
+ return -EINVAL;
+
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
data->mclk_ignore_signal = false;
@@ -3956,6 +4000,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t sclk, mclk, activity_percent;
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ int ret = 0;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -3963,12 +4009,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
+ if (ret)
+ return ret;
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
+ if (ret)
+ return ret;
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
@@ -3977,7 +4027,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
(idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
- AverageGraphicsActivity:
+ AverageGraphicsActivity :
AverageMemoryActivity);
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
@@ -3998,8 +4048,22 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
return 0;
- case AMDGPU_PP_SENSOR_GPU_POWER:
- return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ else
+ return -EOPNOTSUPP;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return -EOPNOTSUPP;
+ else
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
@@ -4153,7 +4217,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
@@ -4210,7 +4274,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
result = smum_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -4218,7 +4282,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
/*populate MCLK dpm table to SMU7 */
result = smum_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
@@ -4309,7 +4373,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
@@ -4907,13 +4971,14 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
- int size = 0;
+ int size = 0, ret = 0;
uint32_t i, now, clock, pcie_speed;
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
-
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
+ if (ret)
+ return ret;
for (i = 0; i < sclk_table->count; i++) {
if (clock > sclk_table->dpm_levels[i].value)
continue;
@@ -4927,8 +4992,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
-
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
+ if (ret)
+ return ret;
for (i = 0; i < mclk_table->count; i++) {
if (clock > mclk_table->dpm_levels[i].value)
continue;
@@ -5378,8 +5444,9 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
- thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
+
+ thermal_data->sw_ctf_threshold = thermal_data->max;
return 0;
}
@@ -5580,7 +5647,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
- if (size < 8 && size != 0)
+ if (size != 8 && size != 0)
return -EINVAL;
/* If only CUSTOM is passed in, use the saved values. Check
* that we actually have a CUSTOM profile by ensuring that
@@ -5686,7 +5753,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.patch_boot_state = smu7_dpm_patch_boot_state,
.get_pp_table_entry = smu7_get_pp_table_entry,
.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
- .powerdown_uvd = smu7_powerdown_uvd,
.powergate_uvd = smu7_powergate_uvd,
.powergate_vce = smu7_powergate_vce,
.disable_clock_power_gating = smu7_disable_clock_power_gating,
@@ -5731,7 +5797,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
- .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_bamaco_support = smu7_get_bamaco_support,
.get_asic_baco_state = smu7_baco_get_state,
.set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,