summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c35
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c40
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h13
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c52
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c235
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c129
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c83
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
37 files changed, 670 insertions, 247 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8ec11da0319f..6627ee07d52d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -203,8 +203,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- bool baco_cap;
- int ret = 0;
+ bool ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
@@ -222,12 +221,11 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
mutex_lock(&adev->pm.mutex);
- ret = pp_funcs->get_asic_baco_capability(pp_handle,
- &baco_cap);
+ ret = pp_funcs->get_asic_baco_capability(pp_handle);
mutex_unlock(&adev->pm.mutex);
- return ret ? false : baco_cap;
+ return ret;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
@@ -618,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
enable ? "enable" : "disable", ret);
}
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
+}
+
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1319,6 +1327,23 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
return ret;
}
+ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
+ size_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_pm_metrics)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode)
{
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 20c53eefd680..f3cb490fe79b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1799,6 +1799,44 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return count;
}
+static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attr,
+ uint32_t mask,
+ enum amdgpu_device_attr_states *states)
+{
+ if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ return 0;
+}
+
+static ssize_t amdgpu_get_pm_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size = 0;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
/**
* DOC: gpu_metrics
*
@@ -2096,6 +2134,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
.attr_update = ss_bias_attr_update),
AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
+ .attr_update = amdgpu_pm_metrics_attr_update),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 482ea30147ab..3047ffe7f244 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
@@ -513,6 +514,18 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+
+/**
+ * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
+ * sample is copied to pm_metrics buffer. It's expected to be allocated by the
+ * caller and size of the allocated buffer is passed. Max size expected for a
+ * metrics sample is 4096 bytes.
+ *
+ * Return: Actual size of the metrics sample
+ */
+ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
+ size_t size);
+
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode);
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5d28c951a319..5cb4725c773f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 81fb4e5dd804..60377747bab4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM;
- }
entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
return -ENOMEM;
- }
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.ppm_table)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
- }
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ if (ret)
return ret;
- }
}
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index fc8e4ac6c8e7..df4f20293c16 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
- }
+
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 914c15387157..aed0e2cefbf9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1371,21 +1371,18 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
return phm_set_active_display_count(hwmgr, count);
}
-static int pp_get_asic_baco_capability(void *handle, bool *cap)
+static bool pp_get_asic_baco_capability(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- *cap = false;
if (!hwmgr)
- return -EINVAL;
+ return false;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->get_asic_baco_capability)
- return 0;
+ return false;
- hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
-
- return 0;
+ return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr);
}
static int pp_get_asic_baco_state(void *handle, int *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
index 044cda005aed..e8a9471c1898 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
@@ -33,21 +33,20 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
return 0;
reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
- return 0;
+ return false;
}
int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
index be0d98abb536..73a773f4ce2e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 11372fcc59c8..b1a8799e2dee 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
return -EINVAL;
}
} else {
@@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
result = smu7_update_edc_leakage_table(hwmgr);
- if (result)
+ if (result) {
+ smu7_hwmgr_backend_fini(hwmgr);
return result;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
index de0a37f7c632..c66ef9741535 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
@@ -28,14 +28,13 @@
#include "vega10_inc.h"
#include "smu9_baco.h"
-int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg, data;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
+ return false;
WREG32(0x12074, 0xFFF0003B);
data = RREG32(0x12075);
@@ -44,10 +43,10 @@ int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
}
- return 0;
+ return false;
}
int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
index 84e90f801ac3..9ff7c2ea1b58 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index 994c0d374bfa..dad4c80aee58 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
@@ -36,23 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
+ return false;
if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) {
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
}
- return 0;
+ return false;
}
int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
index f06471e712dc..bdad9c915631 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 81650727a5de..6f536159df4d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -351,7 +351,7 @@ struct pp_hwmgr_func {
int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
- int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
+ bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr);
int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 9e4228232f02..ad1fd3150d03 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU7_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 97d9802fe673..17d2f5bff4a7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU71_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU71_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU71_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e1a5ee911dbb..c16703868e5c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1322,6 +1322,187 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
return ret;
}
+/**
+ * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
+ *
+ * @smu: smu_context pointer
+ *
+ * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
+{
+ struct wbrf_ranges_in_out wbrf_exclusion = {0};
+ struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
+ uint64_t start, end;
+ int ret, i, j;
+
+ ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
+ return ret;
+ }
+
+ /*
+ * The exclusion ranges array we got might be filled with holes and duplicate
+ * entries. For example:
+ * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
+ * We need to do some sortups to eliminate those holes and duplicate entries.
+ * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
+ */
+ for (i = 0; i < num_of_wbrf_ranges; i++) {
+ start = wifi_bands[i].start;
+ end = wifi_bands[i].end;
+
+ /* get the last valid entry to fill the intermediate hole */
+ if (!start && !end) {
+ for (j = num_of_wbrf_ranges - 1; j > i; j--)
+ if (wifi_bands[j].start && wifi_bands[j].end)
+ break;
+
+ /* no valid entry left */
+ if (j <= i)
+ break;
+
+ start = wifi_bands[i].start = wifi_bands[j].start;
+ end = wifi_bands[i].end = wifi_bands[j].end;
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ num_of_wbrf_ranges = j;
+ }
+
+ /* eliminate duplicate entries */
+ for (j = i + 1; j < num_of_wbrf_ranges; j++) {
+ if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ }
+ }
+ }
+
+ /* Send the sorted wifi_bands to PMFW */
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ /* Try to set the wifi_bands again */
+ if (unlikely(ret == -EBUSY)) {
+ mdelay(5);
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ }
+
+ return ret;
+}
+
+/**
+ * smu_wbrf_event_handler - handle notify events
+ *
+ * @nb: notifier block
+ * @action: event type
+ * @_arg: event data
+ *
+ * Calls relevant amdgpu function in response to wbrf event
+ * notification from kernel.
+ */
+static int smu_wbrf_event_handler(struct notifier_block *nb,
+ unsigned long action, void *_arg)
+{
+ struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
+
+ switch (action) {
+ case WBRF_CHANGED:
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
+
+ smu_wbrf_handle_exclusion_ranges(smu);
+}
+
+/**
+ * smu_wbrf_support_check - check wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the ACPI interface whether wbrf is supported.
+ */
+static void smu_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
+ acpi_amd_wbrf_supported_consumer(adev->dev);
+
+ if (smu->wbrf_supported)
+ dev_info(adev->dev, "RF interference mitigation is supported\n");
+}
+
+/**
+ * smu_wbrf_init - init driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the wbrf
+ * notifier chain if wbrf feature is supported.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_init(struct smu_context *smu)
+{
+ int ret;
+
+ if (!smu->wbrf_supported)
+ return 0;
+
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
+
+ smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
+ ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
+ if (ret)
+ return ret;
+
+ /*
+ * Some wifiband exclusion ranges may be already there
+ * before our driver loaded. To make sure our driver
+ * is awared of those exclusion ranges.
+ */
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+
+ return 0;
+}
+
+/**
+ * smu_wbrf_fini - tear down driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Unregisters with the wbrf notifier chain.
+ */
+static void smu_wbrf_fini(struct smu_context *smu)
+{
+ if (!smu->wbrf_supported)
+ return;
+
+ amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
+
+ cancel_delayed_work_sync(&smu->wbrf_delayed_work);
+}
+
static int smu_smc_hw_setup(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1414,6 +1595,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (ret)
return ret;
+ /* Enable UclkShadow on wbrf supported */
+ if (smu->wbrf_supported) {
+ ret = smu_enable_uclk_shadow(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
+ return ret;
+ }
+ }
+
/*
* With SCPM enabled, these actions(and relevant messages) are
* not needed and permitted.
@@ -1512,6 +1702,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
*/
ret = smu_set_min_dcef_deep_sleep(smu,
smu->smu_table.boot_values.dcefclk / 100);
+ if (ret) {
+ dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
+ return ret;
+ }
+
+ /* Init wbrf support. Properly setup the notifier */
+ ret = smu_wbrf_init(smu);
+ if (ret)
+ dev_err(adev->dev, "Error during wbrf init call\n");
return ret;
}
@@ -1567,6 +1766,13 @@ static int smu_hw_init(void *handle)
return ret;
}
+ /*
+ * Check whether wbrf is supported. This needs to be done
+ * before SMU setup starts since part of SMU configuration
+ * relies on this.
+ */
+ smu_wbrf_support_check(smu);
+
if (smu->is_apu) {
ret = smu_set_gfx_imu_enable(smu);
if (ret)
@@ -1733,6 +1939,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ smu_wbrf_fini(smu);
+
cancel_work_sync(&smu->throttling_logging_work);
cancel_work_sync(&smu->interrupt_work);
@@ -3015,19 +3223,17 @@ static int smu_set_xgmi_pstate(void *handle,
return ret;
}
-static int smu_get_baco_capability(void *handle, bool *cap)
+static bool smu_get_baco_capability(void *handle)
{
struct smu_context *smu = handle;
- *cap = false;
-
if (!smu->pm_enabled)
- return 0;
+ return false;
- if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
- *cap = smu->ppt_funcs->baco_is_support(smu);
+ if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
+ return false;
- return 0;
+ return smu->ppt_funcs->baco_is_support(smu);
}
static int smu_baco_set_state(void *handle, int state)
@@ -3201,6 +3407,20 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
+static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
+ size_t size)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->ppt_funcs->get_pm_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
+}
+
static int smu_enable_mgpu_fan_boost(void *handle)
{
struct smu_context *smu = handle;
@@ -3342,6 +3562,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.set_df_cstate = smu_set_df_cstate,
.set_xgmi_pstate = smu_set_xgmi_pstate,
.get_gpu_metrics = smu_sys_get_gpu_metrics,
+ .get_pm_metrics = smu_sys_get_pm_metrics,
.set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
.get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index f8b2e6cc2568..2aa4fea87314 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -22,6 +22,9 @@
#ifndef __AMDGPU_SMU_H__
#define __AMDGPU_SMU_H__
+#include <linux/acpi_amd_wbrf.h>
+#include <linux/units.h>
+
#include "amdgpu.h"
#include "kgd_pp_interface.h"
#include "dm_pp_interface.h"
@@ -253,6 +256,7 @@ struct smu_table {
uint64_t mc_address;
void *cpu_addr;
struct amdgpu_bo *bo;
+ uint32_t version;
};
enum smu_perf_level_designation {
@@ -317,6 +321,7 @@ enum smu_table_id {
SMU_TABLE_PACE,
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
+ SMU_TABLE_WIFIBAND,
SMU_TABLE_COUNT,
};
@@ -470,6 +475,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE 10
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -569,6 +580,11 @@ struct smu_context {
struct delayed_work swctf_delayed_work;
enum pp_xgmi_plpd_mode plpd_mode;
+
+ /* data structures for wbrf feature support */
+ bool wbrf_supported;
+ struct notifier_block wbrf_notifier;
+ struct delayed_work wbrf_delayed_work;
};
struct i2c_adapter;
@@ -1253,6 +1269,15 @@ struct pptable_funcs {
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
/**
+ * @get_pm_metrics: Get one snapshot of power management metrics from
+ * PMFW.
+ *
+ * Return: Size of the metrics sample
+ */
+ ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
+ size_t size);
+
+ /**
* @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
*/
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
@@ -1365,6 +1390,22 @@ struct pptable_funcs {
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
+
+ /**
+ * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
+ */
+ bool (*is_asic_wbrf_supported)(struct smu_context *smu);
+
+ /**
+ * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
+ */
+ int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
+
+ /**
+ * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
+ */
+ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
};
typedef enum {
@@ -1491,6 +1532,17 @@ enum smu_baco_seq {
__dst_size); \
})
+typedef struct {
+ uint16_t LowFreq;
+ uint16_t HighFreq;
+} WifiOneBand_t;
+
+typedef struct {
+ uint32_t WifiBandEntryNum;
+ WifiOneBand_t WifiBandEntry[11];
+ uint32_t MmHubPadding[8];
+} WifiBandEntryTable_t;
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 9dd1ed5b8940..b114d14fc053 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -1615,7 +1615,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index 62b7c0daff68..8b1496f8ce58 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -1605,7 +1605,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 8f42771e1f0a..5bb7a63c0602 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -24,11 +24,6 @@
#ifndef SMU14_DRIVER_IF_V14_0_0_H
#define SMU14_DRIVER_IF_V14_0_0_H
-// *** IMPORTANT ***
-// SMU TEAM: Always increment the interface version if
-// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
-
typedef struct {
int32_t value;
uint32_t numFractionalBits;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index e2ee855c7748..e862d323caab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,10 +138,9 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-
#define PPSMC_MSG_DALNotPresent 0x4E
-
-#define PPSMC_Message_Count 0x4F
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
index 6aaefca9b595..a6bf9cdd130e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
@@ -134,6 +134,7 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 9dd47d91093e..953a767613b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -259,7 +259,9 @@
__SMU_DUMMY_MAP(PowerUpUmsch), \
__SMU_DUMMY_MAP(PowerDownUmsch), \
__SMU_DUMMY_MAP(SetSoftMaxVpe), \
- __SMU_DUMMY_MAP(SetSoftMinVpe),
+ __SMU_DUMMY_MAP(SetSoftMinVpe), \
+ __SMU_DUMMY_MAP(GetMetricsVersion), \
+ __SMU_DUMMY_MAP(EnableUCLKShadow),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 95cb919718ae..fbd57fa1a004 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -210,15 +210,8 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_baco_seq baco_seq);
-
bool smu_v13_0_baco_is_support(struct smu_context *smu);
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
-
-int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
-
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
@@ -301,5 +294,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable);
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index a5b569976f19..3f7463c1c1a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -26,8 +26,8 @@
#include "amdgpu_smu.h"
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 2cb6b68222ba..4cd43bbec910 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2407,8 +2407,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
.baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a38233cc5b7f..8d1d29ffb0f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3537,8 +3537,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = navi10_baco_enter,
.baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 1de9f8b5cc5f..21fc033528fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4428,8 +4428,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = sienna_cichlid_baco_enter,
.baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index cf1b84060bc3..771a3d457c33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2199,7 +2199,7 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
return ret;
}
-int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2221,33 +2221,14 @@ int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
return 0;
}
-bool smu_v13_0_baco_is_support(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
-
- if (amdgpu_sriov_vf(smu->adev) ||
- !smu_baco->platform_support)
- return false;
-
- /* return true if ASIC is in BACO state already */
- if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
-
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
- !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
-
- return true;
-}
-
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
+static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
return smu_baco->state;
}
-int smu_v13_0_baco_set_state(struct smu_context *smu,
+static int smu_v13_0_baco_set_state(struct smu_context *smu,
enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2281,24 +2262,60 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
return ret;
}
-int smu_v13_0_baco_enter(struct smu_context *smu)
+bool smu_v13_0_baco_is_support(struct smu_context *smu)
{
- int ret = 0;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
- ret = smu_v13_0_baco_set_state(smu,
- SMU_BACO_STATE_ENTER);
- if (ret)
- return ret;
+ if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
+ return false;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
- msleep(10);
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return false;
- return ret;
+ return true;
+}
+
+int smu_v13_0_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ } else {
+ ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
+ if (!ret)
+ usleep_range(10000, 11000);
+
+ return ret;
+ }
}
int smu_v13_0_baco_exit(struct smu_context *smu)
{
- return smu_v13_0_baco_set_state(smu,
- SMU_BACO_STATE_EXIT);
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
+ }
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
@@ -2490,3 +2507,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
return ret == 0 ? 0 : -EINVAL;
}
+
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
+}
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges)
+{
+ WifiBandEntryTable_t wifi_bands;
+ int valid_entries = 0;
+ int ret, i;
+
+ memset(&wifi_bands, 0, sizeof(wifi_bands));
+ for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) {
+ if (!exclusion_ranges[i].start && !exclusion_ranges[i].end)
+ break;
+
+ /* PMFW expects the inputs to be in Mhz unit */
+ wifi_bands.WifiBandEntry[valid_entries].LowFreq =
+ DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ);
+ wifi_bands.WifiBandEntry[valid_entries++].HighFreq =
+ DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ);
+ }
+ wifi_bands.WifiBandEntryNum = valid_entries;
+
+ /*
+ * Per confirm with PMFW team, WifiBandEntryNum = 0
+ * is a valid setting.
+ *
+ * Considering the scenarios below:
+ * - At first the wifi device adds an exclusion range e.g. (2400,2500) to
+ * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1
+ * and pass the WifiBandEntry (2400, 2500) to PMFW.
+ *
+ * - Later the wifi device removes the wifiband list added above and
+ * our driver gets notified again. At this time, driver will set
+ * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW.
+ *
+ * - PMFW may still need to do some uclk shadow update(e.g. switching
+ * from shadow clock back to primary clock) on receiving this.
+ */
+ ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
+ if (ret)
+ dev_warn(smu->adev->dev, "Failed to set wifiband!");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 82c4e1f1c6f0..231122622a9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2540,16 +2545,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask = 1 << workload_type;
- /* Add optimizations for SMU13.0.0. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7400))) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
+ /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
+ }
}
return smu_cmn_send_smc_msg_with_param(smu,
@@ -2558,38 +2566,6 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
-static int smu_v13_0_0_baco_enter(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
- return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
- BACO_SEQ_BAMACO : BACO_SEQ_BACO);
- else
- return smu_v13_0_baco_enter(smu);
-}
-
-static int smu_v13_0_0_baco_exit(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
- /* Wait for PMFW handling for the Dstate change */
- usleep_range(10000, 11000);
- ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
- } else {
- ret = smu_v13_0_baco_exit(smu);
- }
-
- if (!ret)
- adev->gfx.is_poweron = false;
-
- return ret;
-}
-
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2970,6 +2946,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ return smu->smc_fw_version >= 0x004e6300;
+ case IP_VERSION(13, 0, 10):
+ return smu->smc_fw_version >= 0x00503300;
+ default:
+ return false;
+ }
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3035,10 +3025,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.deep_sleep_control = smu_v13_0_deep_sleep_control,
.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_0_baco_enter,
- .baco_exit = smu_v13_0_0_baco_exit,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_0_mode1_reset,
.mode2_reset = smu_v13_0_0_mode2_reset,
@@ -3050,6 +3038,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
.notify_display_change = smu_v13_0_notify_display_change,
+ .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index b64e07b75937..4ebc6b421c2c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -120,6 +120,7 @@ struct mca_ras_info {
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
+// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -128,6 +129,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
@@ -171,6 +173,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
};
+// clang-format on
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
CLK_MAP(FCLK, PPCLK_FCLK),
@@ -432,6 +435,41 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
return 0;
}
+static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
+ void *metrics, size_t max_size)
+{
+ struct smu_table_context *smu_tbl_ctxt = &smu->smu_table;
+ uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version;
+ uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size;
+ struct amdgpu_pm_metrics *pm_metrics = metrics;
+ uint32_t pmfw_version;
+ int ret;
+
+ if (!pm_metrics || !max_size)
+ return -EINVAL;
+
+ if (max_size < (table_size + sizeof(pm_metrics->common_header)))
+ return -EOVERFLOW;
+
+ /* Don't use cached metrics data */
+ ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+
+ memset(&pm_metrics->common_header, 0,
+ sizeof(pm_metrics->common_header));
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ IP_VERSION(13, 0, 6);
+ pm_metrics->common_header.pmfw_version = pmfw_version;
+ pm_metrics->common_header.pmmetrics_version = table_version;
+ pm_metrics->common_header.structure_size =
+ sizeof(pm_metrics->common_header) + table_size;
+
+ return pm_metrics->common_header.structure_size;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -441,6 +479,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
(struct PPTable_t *)smu_table->driver_pptable;
struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
+ uint32_t table_version;
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
@@ -459,6 +498,13 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
if (!retry)
return -ETIME;
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
pptable->MaxSocketPowerLimit =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
@@ -1477,7 +1523,6 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
if (smu->smc_fw_version < 0x554800)
return 0;
- amdgpu_ras_set_mca_debug_mode(smu->adev, enable);
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
NULL);
@@ -2329,16 +2374,6 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_6_post_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
- return smu_v13_0_6_mca_set_debug_mode(smu, false);
-
- return 0;
-}
-
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -2421,8 +2456,8 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT
static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
{
- uint64_t ipid = entry->regs[MCA_REG_IDX_IPID];
- uint32_t insthi;
+ u64 ipid = entry->regs[MCA_REG_IDX_IPID];
+ u32 instidhi, instid;
/* NOTE: All MCA IPID register share the same format,
* so the driver can share the MCMP1 register header file.
@@ -2431,9 +2466,15 @@ static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_
info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
- info->aid = ((insthi >> 2) & 0x03);
- info->socket_id = insthi & 0x03;
+ /*
+ * Unfied DieID Format: SAASS. A:AID, S:Socket.
+ * Unfied DieID[4] = InstanceId[0]
+ * Unfied DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
+ instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo);
+ info->aid = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03);
}
static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
@@ -2512,9 +2553,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
return 0;
}
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
*count = 1;
- else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
*count = 1;
return 0;
@@ -2525,13 +2566,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st
uint32_t *count)
{
u32 ext_error_code;
+ u32 err_cnt;
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
+ err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
- *count = 1;
+ *count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
- *count = 1;
+ *count = err_cnt;
return 0;
}
@@ -2607,6 +2650,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct
uint32_t instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ instlo &= GENMASK(31, 1);
switch (instlo) {
case 0x36430400: /* SMNAID XCD 0 */
case 0x38430400: /* SMNAID XCD 1 */
@@ -2626,6 +2670,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
uint32_t errcode, instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ instlo &= GENMASK(31, 1);
if (instlo != 0x03b30400)
return false;
@@ -2848,6 +2893,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
return ret;
}
+static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ /* Support ecc info by default */
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2892,6 +2944,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .get_pm_metrics = smu_v13_0_6_get_pm_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
@@ -2901,7 +2954,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
- .post_init = smu_v13_0_6_post_init,
+ .get_ecc_info = smu_v13_0_6_get_ecc_info,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 81eafed76045..59606a19e3d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2515,38 +2520,6 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_7_baco_enter(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
- return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
- BACO_SEQ_BAMACO : BACO_SEQ_BACO);
- else
- return smu_v13_0_baco_enter(smu);
-}
-
-static int smu_v13_0_7_baco_exit(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
- /* Wait for PMFW handling for the Dstate change */
- usleep_range(10000, 11000);
- ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
- } else {
- ret = smu_v13_0_baco_exit(smu);
- }
-
- if (!ret)
- adev->gfx.is_poweron = false;
-
- return ret;
-}
-
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2567,6 +2540,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
NULL);
}
+static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
+{
+ return smu->smc_fw_version > 0x00524600;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2626,15 +2604,16 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_7_baco_enter,
- .baco_exit = smu_v13_0_7_baco_exit,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
.set_df_cstate = smu_v13_0_7_set_df_cstate,
.gpo_control = smu_v13_0_gpo_control,
+ .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index d8f8ad0e7137..4894f7ee737b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -224,7 +224,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
@@ -235,7 +235,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- adev->ip_versions[MP1_HWIP][0]);
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
break;
}
@@ -733,7 +733,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 94ccdbfd7090..47fdbae4adfc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1085,6 +1085,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1115,6 +1134,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
+ .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 64766ac69c53..6f4d212607d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -98,6 +98,9 @@
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
+#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
+#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
+#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
#endif
#endif