summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_dpm.c')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c173
1 files changed, 113 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6627ee07d52d..7a22aef6e59c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return ret;
}
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
+int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+ bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
- if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
+ (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
@@ -88,7 +93,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
@@ -96,7 +100,12 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_VPE:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
+ (adev)->powerplay.pp_handle, block_type, gate, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
+ ret = (pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate, inst));
break;
default:
break;
@@ -168,7 +177,11 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (pp_funcs && pp_funcs->set_mp1_state) {
+ if (mp1_state == PP_MP1_STATE_FLR) {
+ /* VF lost access to SMU */
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.dpm_enabled = false;
+ } else if (pp_funcs && pp_funcs->set_mp1_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_mp1_state(
@@ -199,14 +212,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
return ret;
}
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- bool ret;
+ int ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
- return false;
+ return 0;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
@@ -217,7 +230,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
* devices. Needs more investigation.
*/
if (adev->in_s3)
- return false;
+ return 0;
mutex_lock(&adev->pm.mutex);
@@ -368,43 +381,30 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
+ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
+ enum pp_pm_policy p_type, char *buf)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int mode = XGMI_PLPD_NONE;
+ int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
- mode = smu->plpd_mode;
- if (mode_desc == NULL)
- return mode;
- switch (smu->plpd_mode) {
- case XGMI_PLPD_DISALLOW:
- *mode_desc = "disallow";
- break;
- case XGMI_PLPD_DEFAULT:
- *mode_desc = "default";
- break;
- case XGMI_PLPD_OPTIMIZED:
- *mode_desc = "optimized";
- break;
- case XGMI_PLPD_NONE:
- default:
- *mode_desc = "none";
- break;
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_pm_policy_info(smu, p_type, buf);
+ mutex_unlock(&adev->pm.mutex);
}
- return mode;
+ return ret;
}
-int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
+int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
+ int policy_level)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
- ret = smu_set_xgmi_plpd_mode(smu, mode);
+ ret = smu_set_pm_policy(smu, policy_type, policy_level);
mutex_unlock(&adev->pm.mutex);
}
@@ -575,7 +575,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -600,7 +610,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -610,7 +620,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -620,7 +630,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
enable ? "enable" : "disable", ret);
@@ -631,7 +641,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int r = 0;
- if (!pp_funcs || !pp_funcs->load_firmware)
+ if (!pp_funcs || !pp_funcs->load_firmware ||
+ (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
return 0;
mutex_lock(&adev->pm.mutex);
@@ -693,6 +704,36 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
return ret;
}
+int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_rma_reason(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -946,6 +987,24 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
return level;
}
+static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
+{
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+}
+
+static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
+{
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+}
+
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
@@ -966,6 +1025,10 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
if (current_level == level)
return 0;
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
if (adev->asic_type == CHIP_RAVEN) {
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
@@ -977,35 +1040,25 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
- if (!(current_level & profile_mode_mask) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
- return -EINVAL;
-
- if (!(current_level & profile_mode_mask) &&
- (level & profile_mode_mask)) {
- /* enter UMD Pstate */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- } else if ((current_level & profile_mode_mask) &&
- !(level & profile_mode_mask)) {
- /* exit UMD Pstate */
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- }
+ if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
mutex_lock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex);
+ /* If new level failed, retain the umd state as before */
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+
return -EINVAL;
}