diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
87 files changed, 11619 insertions, 6169 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index f84bfed50681..5c1cbdc122d2 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) return ret; } -int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) +int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, + uint32_t block_type, + bool gate, + int inst) { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; - if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && + (!is_vcn || adev->vcn.num_vcn_inst == 1)) { dev_dbg(adev->dev, "IP block%d already in the target %s state!", block_type, gate ? "gate" : "ungate"); return 0; @@ -88,7 +93,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: case AMD_IP_BLOCK_TYPE_GFX: - case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_SDMA: case AMD_IP_BLOCK_TYPE_JPEG: case AMD_IP_BLOCK_TYPE_GMC: @@ -96,7 +100,12 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block case AMD_IP_BLOCK_TYPE_VPE: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); + (adev)->powerplay.pp_handle, block_type, gate, 0)); + break; + case AMD_IP_BLOCK_TYPE_VCN: + if (pp_funcs && pp_funcs->set_powergating_by_smu) + ret = (pp_funcs->set_powergating_by_smu( + (adev)->powerplay.pp_handle, block_type, gate, inst)); break; default: break; @@ -168,7 +177,11 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_mp1_state) { + if (mp1_state == PP_MP1_STATE_FLR) { + /* VF lost access to SMU */ + if (amdgpu_sriov_vf(adev)) + adev->pm.dpm_enabled = false; + } else if (pp_funcs && pp_funcs->set_mp1_state) { mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_mp1_state( @@ -199,14 +212,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) return ret; } -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - bool ret; + int ret; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) - return false; + return 0; /* Don't use baco for reset in S3. * This is a workaround for some platforms * where entering BACO during suspend @@ -217,7 +230,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) * devices. Needs more investigation. */ if (adev->in_s3) - return false; + return 0; mutex_lock(&adev->pm.mutex); @@ -316,6 +329,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) return ret; } +bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + bool support_link_reset = false; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + support_link_reset = smu_link_reset_is_support(smu); + mutex_unlock(&adev->pm.mutex); + } + + return support_link_reset; +} + +int amdgpu_dpm_link_reset(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + ret = smu_link_reset(smu); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en) @@ -336,6 +377,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, return ret; } +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + if (pp_funcs && pp_funcs->pause_power_profile) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->pause_power_profile( + adev->powerplay.pp_handle, pause); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, uint32_t pstate) { @@ -368,43 +428,30 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, return ret; } -int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc) +ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, + enum pp_pm_policy p_type, char *buf) { struct smu_context *smu = adev->powerplay.pp_handle; - int mode = XGMI_PLPD_NONE; + int ret = -EOPNOTSUPP; if (is_support_sw_smu(adev)) { - mode = smu->plpd_mode; - if (mode_desc == NULL) - return mode; - switch (smu->plpd_mode) { - case XGMI_PLPD_DISALLOW: - *mode_desc = "disallow"; - break; - case XGMI_PLPD_DEFAULT: - *mode_desc = "default"; - break; - case XGMI_PLPD_OPTIMIZED: - *mode_desc = "optimized"; - break; - case XGMI_PLPD_NONE: - default: - *mode_desc = "none"; - break; - } + mutex_lock(&adev->pm.mutex); + ret = smu_get_pm_policy_info(smu, p_type, buf); + mutex_unlock(&adev->pm.mutex); } - return mode; + return ret; } -int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode) +int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, + int policy_level) { struct smu_context *smu = adev->powerplay.pp_handle; int ret = -EOPNOTSUPP; if (is_support_sw_smu(adev)) { mutex_lock(&adev->pm.mutex); - ret = smu_set_xgmi_plpd_mode(smu, mode); + ret = smu_set_pm_policy(smu, policy_type, policy_level); mutex_unlock(&adev->pm.mutex); } @@ -575,7 +622,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) return; } - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); + if (ret) + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", + enable ? "enable" : "disable", ret); +} + +void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) +{ + int ret = 0; + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); if (ret) DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", enable ? "enable" : "disable", ret); @@ -600,7 +657,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) return; } - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); if (ret) DRM_ERROR("Dpm %s vce failed, ret = %d. \n", enable ? "enable" : "disable", ret); @@ -610,7 +667,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) { int ret = 0; - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); if (ret) DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", enable ? "enable" : "disable", ret); @@ -620,7 +677,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) { int ret = 0; - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); if (ret) DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", enable ? "enable" : "disable", ret); @@ -631,7 +688,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int r = 0; - if (!pp_funcs || !pp_funcs->load_firmware) + if (!pp_funcs || !pp_funcs->load_firmware || + (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) return 0; mutex_lock(&adev->pm.mutex); @@ -705,6 +763,63 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) ret = smu_send_rma_reason(smu); mutex_unlock(&adev->pm.mutex); + if (adev->cper.enabled) + if (amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); + + return ret; +} + +/** + * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported + * @adev: amdgpu_device pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns false if the hardware does not support software SMU or + * if the feature is not supported. + */ +bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + bool ret; + + if (!is_support_sw_smu(adev)) + return false; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_sdma_is_supported(smu); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_sdma(smu, inst_mask); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_vcn(smu, inst_mask); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -961,6 +1076,24 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device return level; } +static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev) +{ + /* enter UMD Pstate */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); +} + +static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev) +{ + /* exit UMD Pstate */ + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); +} + int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, enum amd_dpm_forced_level level) { @@ -981,6 +1114,10 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, if (current_level == level) return 0; + if (!(current_level & profile_mode_mask) && + (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) + return -EINVAL; + if (adev->asic_type == CHIP_RAVEN) { if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && @@ -992,35 +1129,25 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, } } - if (!(current_level & profile_mode_mask) && - (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) - return -EINVAL; - - if (!(current_level & profile_mode_mask) && - (level & profile_mode_mask)) { - /* enter UMD Pstate */ - amdgpu_device_ip_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - amdgpu_device_ip_set_clockgating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } else if ((current_level & profile_mode_mask) && - !(level & profile_mode_mask)) { - /* exit UMD Pstate */ - amdgpu_device_ip_set_clockgating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - amdgpu_device_ip_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); - } + if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) + amdgpu_dpm_enter_umd_state(adev); + else if ((current_level & profile_mode_mask) && + !(level & profile_mode_mask)) + amdgpu_dpm_exit_umd_state(adev); mutex_lock(&adev->pm.mutex); if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, level)) { mutex_unlock(&adev->pm.mutex); + /* If new level failed, retain the umd state as before */ + if (!(current_level & profile_mode_mask) && + (level & profile_mode_mask)) + amdgpu_dpm_exit_umd_state(adev); + else if ((current_level & profile_mode_mask) && + !(level & profile_mode_mask)) + amdgpu_dpm_enter_umd_state(adev); + return -EINVAL; } @@ -1570,6 +1697,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) } } +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev) +{ + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu->od_enabled; + } else { + struct pp_hwmgr *hwmgr; + + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } +} + int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size) @@ -1892,3 +2041,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, return ret; } + +/** + * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute + * partition + * @adev: Pointer to the device. + * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. + * @table: Pointer to a buffer where the metrics will be stored. If NULL, the + * function returns the size of the metrics structure. + * + * This function retrieves metrics for a specific XCP, including details such as + * VCN/JPEG activity, clock frequencies, and other performance metrics. If the + * table parameter is NULL, the function returns the size of the metrics + * structure without populating it. + * + * Return: Size of the metrics structure on success, or a negative error code on failure. + */ +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_xcp_metrics) + return 0; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, + table); + mutex_unlock(&adev->pm.mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index f09b9d49297e..edd9895b46c0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -38,6 +38,8 @@ #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 #define MAX_NUM_OF_SUBSETS 8 +#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) + struct od_attribute { struct kobj_attribute attribute; struct list_head entry; @@ -96,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = { }; /** + * amdgpu_pm_dev_state_check - Check if device can be accessed. + * @adev: Target device. + * @runpm: Check runpm status for suspend state checks. + * + * Checks the state of the @adev for access. Return 0 if the device is + * accessible or a negative error code otherwise. + */ +static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) +{ + bool runpm_check = runpm ? adev->in_runpm : false; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !runpm_check) + return -EPERM; + + return 0; +} + +/** + * amdgpu_pm_get_access - Check if device can be accessed, resume if needed. + * @adev: Target device. + * + * Checks the state of the @adev for access. Use runtime pm API to resume if + * needed. Return 0 if the device is accessible or a negative error code + * otherwise. + */ +static int amdgpu_pm_get_access(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_pm_dev_state_check(adev, true); + if (ret) + return ret; + + return pm_runtime_resume_and_get(adev->dev); +} + +/** + * amdgpu_pm_get_access_if_active - Check if device is active for access. + * @adev: Target device. + * + * Checks the state of the @adev for access. Use runtime pm API to determine + * if device is active. Allow access only if device is active.Return 0 if the + * device is accessible or a negative error code otherwise. + */ +static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) +{ + int ret; + + /* Ignore runpm status. If device is in suspended state, deny access */ + ret = amdgpu_pm_dev_state_check(adev, false); + if (ret) + return ret; + + /* + * Allow only if device is active. If runpm is disabled also, as in + * kernels without CONFIG_PM, allow access. + */ + ret = pm_runtime_get_if_active(adev->dev); + if (!ret) + return -EPERM; + + return 0; +} + +/** + * amdgpu_pm_put_access - Put to auto suspend mode after a device access. + * @adev: Target device. + * + * Should be paired with amdgpu_pm_get_access* calls + */ +static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) +{ + pm_runtime_mark_last_busy(adev->dev); + pm_runtime_put_autosuspend(adev->dev); +} + +/** * DOC: power_dpm_state * * The power_dpm_state file is a legacy interface and is only provided for @@ -138,21 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, enum amd_pm_state_type pm; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -169,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, enum amd_pm_state_type state; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; else if (strncmp("balanced", buf, strlen("balanced")) == 0) @@ -183,16 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } amdgpu_dpm_set_power_state(adev, state); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -266,21 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, enum amd_dpm_forced_level level = 0xff; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : @@ -305,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, enum amd_dpm_forced_level level; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -334,16 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); mutex_unlock(&adev->pm.stable_pstate_ctx_lock); return -EINVAL; } @@ -351,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, adev->pm.stable_pstate_ctx = NULL; mutex_unlock(&adev->pm.stable_pstate_ctx_lock); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -367,22 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, uint32_t i; int buf_len, ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); for (i = 0; i < data.nums; i++) @@ -405,23 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -444,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->pm.pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else @@ -467,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - adev->pm.pp_force_state_enabled = false; if (strlen(buf) == 1) @@ -483,11 +505,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_get_pp_num_states(adev, &data); if (ret) @@ -506,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, adev->pm.pp_force_state_enabled = true; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; err_out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return ret; } @@ -537,21 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size, ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (size <= 0) return size; @@ -573,21 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pp_table(adev, buf, count); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -756,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (count > 127 || count == 0) return -EINVAL; @@ -806,11 +804,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } if (amdgpu_dpm_set_fine_grain_clk_vol(adev, type, @@ -829,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, goto err_out; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; err_out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return -EINVAL; } @@ -858,16 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, }; uint clk_index; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -886,8 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -918,25 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, uint64_t featuremask; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtou64(buf, 0, &featuremask); if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -953,23 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, ssize_t size; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1022,16 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, int size = 0; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1040,8 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1091,25 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_force_clock_level(adev, type, mask); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -1276,21 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%d\n", value); } @@ -1305,26 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtol(buf, 0, &value); if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1338,21 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%d\n", value); } @@ -1367,26 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = kstrtol(buf, 0, &value); if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1407,7 +1330,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, * create a custom set of heuristics, write a string of numbers to the file * starting with the number of the custom profile along with a setting * for each heuristic parameter. Due to differences across asic families - * the heuristic parameters vary from family to family. + * the heuristic parameters vary from family to family. Additionally, + * you can apply the custom heuristics to different clock domains. Each + * clock domain is considered a distinct operation so if you modify the + * gfxclk heuristics and then the memclk heuristics, the all of the + * custom heuristics will be retained until you switch to another profile. * */ @@ -1420,23 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ssize_t size; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1459,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, long int profile_mode = 0; const char delimiter[3] = {' ', '\n', '\0'}; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - tmp[0] = *(buf); tmp[1] = '\0'; ret = kstrtol(tmp, 0, &profile_mode); @@ -1490,16 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (!ret) return count; @@ -1513,22 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, { int r, size = sizeof(uint32_t); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + r = amdgpu_pm_get_access_if_active(adev); + if (r) return r; - } /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); return r; } @@ -1582,6 +1485,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, } /** + * DOC: vcn_busy_percent + * + * The amdgpu driver provides a sysfs API for reading how busy the VCN + * is as a percentage. The file vcn_busy_percent is used for this. + * The SMU firmware computes a percentage of load based on the + * aggregate activity level in the IP cores. + */ +static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + unsigned int value; + int r; + + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", value); +} + +/** * DOC: pcie_bw * * The amdgpu driver provides a sysfs API for estimating how much data @@ -1602,27 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, uint64_t count0 = 0, count1 = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->flags & AMD_IS_APU) return -ENODATA; if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return sysfs_emit(buf, "%llu %llu %i\n", count0, count1, pcie_get_mps(adev->pdev)); @@ -1645,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (adev->unique_id) return sysfs_emit(buf, "%016llx\n", adev->unique_id); @@ -1692,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); long throttling_logging_interval; - unsigned long flags; int ret = 0; ret = kstrtol(buf, 0, &throttling_logging_interval); @@ -1703,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, return -EINVAL; if (throttling_logging_interval > 0) { - raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); /* * Reset the ratelimit timer internals. * This can effectively restart the timer. */ - adev->throttling_logging_rs.interval = - (throttling_logging_interval - 1) * HZ; - adev->throttling_logging_rs.begin = 0; - adev->throttling_logging_rs.printed = 0; - adev->throttling_logging_rs.missed = 0; - raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); - + ratelimit_state_reset_interval(&adev->throttling_logging_rs, + (throttling_logging_interval - 1) * HZ); atomic_set(&adev->throttling_logging_enabled, 1); } else { atomic_set(&adev->throttling_logging_enabled, 0); @@ -1744,11 +1651,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1756,8 +1661,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1781,20 +1685,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { + amdgpu_pm_put_access(adev); dev_err(dev, "failed to update thermal limit\n"); return ret; } - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return count; } @@ -1818,21 +1720,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, ssize_t size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1859,16 +1753,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, ssize_t size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1880,8 +1767,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -1977,21 +1863,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, int r = 0; int bias = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return r; - } - r = kstrtoint(buf, 10, &bias); if (r) goto out; + r = amdgpu_pm_get_access(adev); + if (r < 0) + return r; + if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS) bias = AMDGPU_SMARTSHIFT_MAX_BIAS; else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS) @@ -2003,8 +1882,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, /* TODO: update bias level with SMU message */ out: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); + return r; } @@ -2046,9 +1925,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg return 0; } - /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */ - if (gc_ver == IP_VERSION(9, 4, 3)) { - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */ + if (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) { + if (amdgpu_sriov_multi_vf_mode(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; } @@ -2083,7 +1964,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_ * setting should not be allowed from VF if not in one VF mode. */ if (gc_ver >= IP_VERSION(10, 0, 0) || - (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) { + (amdgpu_sriov_multi_vf_mode(adev))) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } @@ -2091,63 +1972,223 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_ return 0; } -/* Following items will be read out to indicate current plpd policy: - * - -1: none - * - 0: disallow - * - 1: default - * - 2: optimized - */ -static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev, - struct device_attribute *attr, - char *buf) +static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - char *mode_desc = "none"; - int mode; + struct device_attribute *dev_attr = &attr->dev_attr; + enum amdgpu_device_attr_id attr_id = attr->attr_id; + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (gc_ver < IP_VERSION(9, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (mp1_ver < IP_VERSION(10, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } + + switch (gc_ver) { + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + break; + default: + break; + } - mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc); + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } - return sysfs_emit(buf, "%d: %s\n", mode, mode_desc); + return 0; } -/* Following argument value is expected from user to change plpd policy - * - arg 0: disallow plpd - * - arg 1: default policy - * - arg 2: optimized policy +/* pm policy attributes */ +struct amdgpu_pm_policy_attr { + struct device_attribute dev_attr; + enum pp_pm_policy id; +}; + +/** + * DOC: pm_policy + * + * Certain SOCs can support different power policies to optimize application + * performance. However, this policy is provided only at SOC level and not at a + * per-process level. This is useful especially when entire SOC is utilized for + * dedicated workload. + * + * The amdgpu driver provides a sysfs API for selecting the policy. Presently, + * only two types of policies are supported through this interface. + * + * Pstate Policy Selection - This is to select different Pstate profiles which + * decides clock/throttling preferences. + * + * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI, + * this helps to select policy to be applied for per link power down. + * + * The list of available policies and policy levels vary between SOCs. They can + * be viewed under pm_policy node directory. If SOC doesn't support any policy, + * this node won't be available. The different policies supported will be + * available as separate nodes under pm_policy. + * + * cat /sys/bus/pci/devices/.../pm_policy/<policy_type> + * + * Reading the policy file shows the different levels supported. The level which + * is applied presently is denoted by * (asterisk). E.g., + * + * .. code-block:: console + * + * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate + * 0 : soc_pstate_default + * 1 : soc_pstate_0 + * 2 : soc_pstate_1* + * 3 : soc_pstate_2 + * + * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd + * 0 : plpd_disallow + * 1 : plpd_default + * 2 : plpd_optimized* + * + * To apply a specific policy + * + * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>" + * + * For the levels listed in the example above, to select "plpd_optimized" for + * XGMI and "soc_pstate_2" for soc pstate policy - + * + * .. code-block:: console + * + * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd + * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate + * */ -static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t amdgpu_get_pm_policy_attr(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - int mode, ret; + struct amdgpu_pm_policy_attr *policy_attr; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - ret = kstrtos32(buf, 0, &mode); - if (ret) + return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf); +} + +static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct amdgpu_pm_policy_attr *policy_attr; + int ret, num_params = 0; + char delimiter[] = " \n\t"; + char tmp_buf[128]; + char *tmp, *param; + long val; + + count = min(count, sizeof(tmp_buf)); + memcpy(tmp_buf, buf, count); + tmp_buf[count - 1] = '\0'; + tmp = tmp_buf; + + tmp = skip_spaces(tmp); + while ((param = strsep(&tmp, delimiter))) { + if (!strlen(param)) { + tmp = skip_spaces(tmp); + continue; + } + ret = kstrtol(param, 0, &val); + if (ret) + return -EINVAL; + num_params++; + if (num_params > 1) + return -EINVAL; + } + + if (num_params != 1) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); + + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } - ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode); + ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + amdgpu_pm_put_access(adev); if (ret) return ret; @@ -2155,6 +2196,48 @@ static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev, return count; } +#define AMDGPU_PM_POLICY_ATTR(_name, _id) \ + static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \ + .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \ + amdgpu_set_pm_policy_attr), \ + .id = PP_PM_POLICY_##_id, \ + }; + +#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr + +AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE) +AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD) + +static struct attribute *pm_policy_attrs[] = { + &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate), + &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd), + NULL +}; + +static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct amdgpu_pm_policy_attr *policy_attr; + + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr); + + if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) == + -ENOENT) + return 0; + + return attr->mode; +} + +const struct attribute_group amdgpu_pm_policy_attr_group = { + .name = "pm_policy", + .attrs = pm_policy_attrs, + .is_visible = amdgpu_pm_policy_attr_visible, +}; + static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2162,17 +2245,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, .attr_update = pp_dpm_dcefclk_attr_update), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2180,6 +2272,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2192,7 +2285,6 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = ss_power_attr_update), AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, .attr_update = ss_bias_attr_update), - AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC, .attr_update = amdgpu_pm_metrics_attr_update), }; @@ -2201,28 +2293,37 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ uint32_t mask, enum amdgpu_device_attr_states *states) { struct device_attribute *dev_attr = &attr->dev_attr; - uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + enum amdgpu_device_attr_id attr_id = attr->attr_id; uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); - const char *attr_name = dev_attr->attr.name; if (!(attr->flags & mask)) { *states = ATTR_STATE_UNSUPPORTED; return 0; } -#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) - - if (DEVICE_ATTR_IS(pp_dpm_socclk)) { - if (gc_ver < IP_VERSION(9, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { - if (mp1_ver < IP_VERSION(10, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { + if (!(gc_ver == IP_VERSION(9, 3, 0) || + gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 5, 1) || + gc_ver == IP_VERSION(11, 5, 2) || + gc_ver == IP_VERSION(11, 5, 3) || + gc_ver == IP_VERSION(12, 0, 0) || + gc_ver == IP_VERSION(12, 0, 1))) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ if (adev->flags & AMD_IS_APU || @@ -2235,11 +2336,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + case IP_VERSION(9, 5, 0): case IP_VERSION(10, 3, 0): case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): *states = ATTR_STATE_SUPPORTED; break; default: @@ -2253,45 +2358,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(gpu_metrics)) { if (gc_ver < IP_VERSION(9, 1, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; else if ((gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) { - if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_mclk_od)) { if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; @@ -2304,23 +2376,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { - if (gc_ver == IP_VERSION(9, 4, 2) || - gc_ver == IP_VERSION(9, 4, 3)) - *states = ATTR_STATE_UNSUPPORTED; } switch (gc_ver) { - case IP_VERSION(9, 4, 1): - case IP_VERSION(9, 4, 2): - /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ - if (DEVICE_ATTR_IS(pp_dpm_mclk) || - DEVICE_ATTR_IS(pp_dpm_socclk) || - DEVICE_ATTR_IS(pp_dpm_fclk)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - break; case IP_VERSION(10, 3, 0): if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && amdgpu_sriov_vf(adev)) { @@ -2332,14 +2390,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ break; } - /* setting should not be allowed from VF if not in one VF mode */ - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - -#undef DEVICE_ATTR_IS - return 0; } @@ -2573,21 +2623,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -2605,11 +2647,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, u32 pwm_mode; int value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2623,16 +2660,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = amdgpu_pm_get_access(adev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -2663,20 +2697,13 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtou32(buf, 10, &value); if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access(adev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2691,8 +2718,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, err = amdgpu_dpm_set_fan_speed_pwm(adev, value); out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2708,21 +2734,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access_if_active(adev); + if (err) return err; - } err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2738,21 +2756,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access_if_active(adev); + if (err) return err; - } err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2802,21 +2812,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access_if_active(adev); + if (err) return err; - } err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2833,20 +2835,13 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtou32(buf, 10, &value); if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access(adev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2860,8 +2855,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, err = amdgpu_dpm_set_fan_speed_rpm(adev, value); out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -2877,21 +2871,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (ret) return -EINVAL; @@ -2909,11 +2895,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2925,16 +2906,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access(adev); + if (err < 0) return err; - } err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return -EINVAL; @@ -2959,6 +2937,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, return sysfs_emit(buf, "%d\n", vddgfx); } +static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + u32 vddboard; + int r; + + /* get the voltage */ + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&vddboard); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", vddboard); +} + static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, struct device_attribute *attr, char *buf) @@ -2966,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, return sysfs_emit(buf, "vddgfx\n"); } +static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "vddboard\n"); +} static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, struct device_attribute *attr, char *buf) @@ -3049,16 +3050,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, ssize_t size; int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + r = amdgpu_pm_get_access_if_active(adev); + if (r) return r; - } r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3068,8 +3062,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); return size; } @@ -3130,11 +3123,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -3145,16 +3133,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = amdgpu_pm_get_access(adev); + if (err < 0) return err; - } err = amdgpu_dpm_set_power_limit(adev, value); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_pm_put_access(adev); if (err) return err; @@ -3325,6 +3310,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0) static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0); +static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0); static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); @@ -3372,6 +3359,8 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_in0_label.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_label.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_label.dev_attr.attr, &sensor_dev_attr_power1_average.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power1_cap_max.dev_attr.attr, @@ -3432,7 +3421,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* Skip crit temp on APU */ if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || - (gc_ver == IP_VERSION(9, 4, 3))) && + (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) return 0; @@ -3468,7 +3458,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && - (gc_ver != IP_VERSION(9, 4, 3)))) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) && (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power1_cap.dev_attr.attr || @@ -3506,17 +3496,29 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ - (gc_ver == IP_VERSION(9, 4, 3))) && + (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_in0_input.dev_attr.attr || attr == &sensor_dev_attr_in0_label.dev_attr.attr)) return 0; /* only APUs other than gc 9,4,3 have vddnb */ - if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) && + if ((!(adev->flags & AMD_IS_APU) || + (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0))) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; + /* only few boards support vddboard */ + if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || + attr == &sensor_dev_attr_in2_label.dev_attr.attr) && + amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&tmp) == -EOPNOTSUPP) + return 0; + /* no mclk on APUs other than gc 9,4,3*/ if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || @@ -3524,7 +3526,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && - (gc_ver != IP_VERSION(9, 4, 3)) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) && (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || attr == &sensor_dev_attr_temp2_label.dev_attr.attr || attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || @@ -3534,7 +3536,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* hotspot temperature for gc 9,4,3*/ - if (gc_ver == IP_VERSION(9, 4, 3)) { + if (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4) || + gc_ver == IP_VERSION(9, 5, 0)) { if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp1_label.dev_attr.attr) @@ -3584,23 +3588,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, int size = 0; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - ret = pm_runtime_get_sync(adev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev->dev); + ret = amdgpu_pm_get_access_if_active(adev); + if (ret) return ret; - } size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev->dev); - pm_runtime_put_autosuspend(adev->dev); + amdgpu_pm_put_access(adev); return size; } @@ -3668,11 +3664,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, long parameter[64]; int ret; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - ret = parse_input_od_command_lines(in_buf, count, &cmd_type, @@ -3681,34 +3672,31 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_get_sync(adev->dev); + ret = amdgpu_pm_get_access(adev); if (ret < 0) - goto err_out0; + return ret; ret = amdgpu_dpm_odn_edit_dpm_table(adev, cmd_type, parameter, parameter_size); if (ret) - goto err_out1; + goto err_out; if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { ret = amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); if (ret) - goto err_out1; + goto err_out; } - pm_runtime_mark_last_busy(adev->dev); - pm_runtime_put_autosuspend(adev->dev); + amdgpu_pm_put_access(adev); return count; -err_out1: - pm_runtime_mark_last_busy(adev->dev); -err_out0: - pm_runtime_put_autosuspend(adev->dev); +err_out: + amdgpu_pm_put_access(adev); return ret; } @@ -4015,6 +4003,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) return umode; } +/** + * DOC: fan_zero_rpm_enable + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + */ +static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); +} + +static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + buf, + count); +} + +static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) + umode |= S_IWUSR; + + return umode; +} + +/** + * DOC: fan_zero_rpm_stop_temperature + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM stop temperature feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + * + * This setting works only if the Zero RPM setting is enabled. It adjusts the + * temperature below which the fan can stop. + */ +static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); +} + +static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, + buf, + count); +} + +static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) + umode |= S_IWUSR; + + return umode; +} + static struct od_feature_set amdgpu_od_set = { .containers = { [0] = { @@ -4060,6 +4159,22 @@ static struct od_feature_set amdgpu_od_set = { .store = fan_minimum_pwm_store, }, }, + [5] = { + .name = "fan_zero_rpm_enable", + .ops = { + .is_visible = fan_zero_rpm_enable_visible, + .show = fan_zero_rpm_enable_show, + .store = fan_zero_rpm_enable_store, + }, + }, + [6] = { + .name = "fan_zero_rpm_stop_temperature", + .ops = { + .is_visible = fan_zero_rpm_stop_temp_visible, + .show = fan_zero_rpm_stop_temp_show, + .store = fan_zero_rpm_stop_temp_store, + }, + }, }, }, }, @@ -4261,6 +4376,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev) } } + /* + * If gpu_od is the only member in the list, that means gpu_od is an + * empty directory, so remove it. + */ + if (list_is_singular(&adev->pm.od_kobj_list)) + goto err_out; + return 0; err_out: @@ -4288,8 +4410,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) /* under multi-vf mode, the hwmon attributes are all not supported */ if (mode != SRIOV_VF_MODE_MULTI_VF) { adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, - DRIVER_NAME, adev, - hwmon_groups); + DRIVER_NAME, adev, + hwmon_groups); if (IS_ERR(adev->pm.int_hwmon_dev)) { ret = PTR_ERR(adev->pm.int_hwmon_dev); dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret); @@ -4322,6 +4444,16 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) ret = amdgpu_od_set_init(adev); if (ret) goto err_out1; + } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) { + dev_info(adev->dev, "overdrive feature is not supported\n"); + } + + if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) != + -EOPNOTSUPP) { + ret = devm_device_add_group(adev->dev, + &amdgpu_pm_policy_attr_group); + if (ret) + goto err_out0; } adev->pm.sysfs_initialized = true; @@ -4429,6 +4561,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* MEM Load */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) seq_printf(m, "MEM Load: %u %%\n", value); + /* VCN Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) + seq_printf(m, "VCN Load: %u %%\n", value); seq_printf(m, "\n"); @@ -4531,20 +4666,12 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags) static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct drm_device *dev = adev_to_drm(adev); u64 flags = 0; int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(dev->dev); + r = amdgpu_pm_get_access(adev); + if (r < 0) return r; - } if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); @@ -4559,8 +4686,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); + amdgpu_pm_put_access(adev); return r; } @@ -4580,10 +4706,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, void *smu_prv_buf; int ret = 0; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + ret = amdgpu_pm_dev_state_check(adev, true); + if (ret) + return ret; ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size); if (ret) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 621200e0823f..768317ee1486 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -50,8 +50,12 @@ enum amdgpu_runpm_mode { AMDGPU_RUNPM_PX, AMDGPU_RUNPM_BOCO, AMDGPU_RUNPM_BACO, + AMDGPU_RUNPM_BAMACO, }; +#define BACO_SUPPORT (1<<0) +#define MACO_SUPPORT (1<<1) + struct amdgpu_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ @@ -291,7 +295,8 @@ enum ip_power_state { }; /* Used to mask smu debug modes */ -#define SMU_DEBUG_HALT_ON_ERROR 0x1 +#define SMU_DEBUG_HALT_ON_ERROR BIT(0) +#define SMU_DEBUG_POOL_USE_VRAM BIT(1) #define MAX_SMU_I2C_BUSES 2 @@ -324,6 +329,10 @@ struct config_table_setting #define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE BIT(10) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET BIT(11) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE BIT(12) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET BIT(13) struct amdgpu_pm { struct mutex mutex; @@ -389,7 +398,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit); int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, - uint32_t block_type, bool gate); + uint32_t block_type, bool gate, int inst); extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low); @@ -401,15 +410,19 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en); +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause); int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); +int amdgpu_dpm_link_reset(struct amdgpu_device *adev); int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev); -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev); +bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev); int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev); int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, @@ -426,11 +439,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev); int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, uint32_t cstate); -int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, - char **mode); - -int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode); - int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev); int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, @@ -443,6 +451,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable); @@ -515,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size); int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table); /** * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The @@ -552,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, void **addr, size_t *size); int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev); int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size); @@ -594,4 +606,12 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, unsigned int *num_states); int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, struct dpm_clocks *clock_table); +int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, + int policy_level); +ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, + enum pp_pm_policy p_type, char *buf); +int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask); +bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev); +int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask); + #endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index eec816f0cbf9..c12ced32f780 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -43,8 +43,47 @@ enum amdgpu_device_attr_states { ATTR_STATE_SUPPORTED, }; +enum amdgpu_device_attr_id { + device_attr_id__unknown = -1, + device_attr_id__power_dpm_state = 0, + device_attr_id__power_dpm_force_performance_level, + device_attr_id__pp_num_states, + device_attr_id__pp_cur_state, + device_attr_id__pp_force_state, + device_attr_id__pp_table, + device_attr_id__pp_dpm_sclk, + device_attr_id__pp_dpm_mclk, + device_attr_id__pp_dpm_socclk, + device_attr_id__pp_dpm_fclk, + device_attr_id__pp_dpm_vclk, + device_attr_id__pp_dpm_vclk1, + device_attr_id__pp_dpm_dclk, + device_attr_id__pp_dpm_dclk1, + device_attr_id__pp_dpm_dcefclk, + device_attr_id__pp_dpm_pcie, + device_attr_id__pp_sclk_od, + device_attr_id__pp_mclk_od, + device_attr_id__pp_power_profile_mode, + device_attr_id__pp_od_clk_voltage, + device_attr_id__gpu_busy_percent, + device_attr_id__mem_busy_percent, + device_attr_id__vcn_busy_percent, + device_attr_id__pcie_bw, + device_attr_id__pp_features, + device_attr_id__unique_id, + device_attr_id__thermal_throttling_logging, + device_attr_id__apu_thermal_cap, + device_attr_id__gpu_metrics, + device_attr_id__smartshift_apu_power, + device_attr_id__smartshift_dgpu_power, + device_attr_id__smartshift_bias, + device_attr_id__pm_metrics, + device_attr_id__count, +}; + struct amdgpu_device_attr { struct device_attribute dev_attr; + enum amdgpu_device_attr_id attr_id; enum amdgpu_device_attr_flags flags; int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states); @@ -61,6 +100,7 @@ struct amdgpu_device_attr_entry { #define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .attr_id = device_attr_id__##_name, \ .flags = _flags, \ ##__VA_ARGS__, } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 5cb4725c773f..34e71727b27d 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = @@ -2592,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); } if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) pi->caps_enable_dfs_bypass = true; sumo_construct_sclk_voltage_mapping_table(adev, @@ -2952,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle) return actual_temp; } -static int kv_dpm_early_init(void *handle) +static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &kv_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -2963,10 +2965,10 @@ static int kv_dpm_early_init(void *handle) return 0; } -static int kv_dpm_late_init(void *handle) +static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block) { /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -2977,11 +2979,10 @@ static int kv_dpm_late_init(void *handle) return 0; } -static int kv_dpm_sw_init(void *handle) +static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -3022,9 +3023,9 @@ dpm_failed: return ret; } -static int kv_dpm_sw_fini(void *handle) +static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -3033,14 +3034,15 @@ static int kv_dpm_sw_fini(void *handle) return 0; } -static int kv_dpm_hw_init(void *handle) +static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) @@ -3048,12 +3050,14 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); + return ret; } -static int kv_dpm_hw_fini(void *handle) +static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) kv_dpm_disable(adev); @@ -3061,54 +3065,53 @@ static int kv_dpm_hw_fini(void *handle) return 0; } -static int kv_dpm_suspend(void *handle) +static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + cancel_work_sync(&adev->pm.dpm.thermal.work); if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ kv_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } return 0; } -static int kv_dpm_resume(void *handle) +static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + struct amdgpu_device *adev = ip_block->adev; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); /* asic init will reset to the boot state */ kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + return ret; } -static bool kv_dpm_is_idle(void *handle) +static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block) { return true; } -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -3187,13 +3190,13 @@ static int kv_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int kv_dpm_set_clockgating_state(void *handle, +static int kv_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; } -static int kv_dpm_set_powergating_state(void *handle, +static int kv_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -3286,7 +3289,9 @@ static int kv_dpm_read_sensor(void *handle, int idx, } static int kv_set_powergating_by_smu(void *handle, - uint32_t block_type, bool gate) + uint32_t block_type, + bool gate, + int inst) { switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -3312,8 +3317,6 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .suspend = kv_dpm_suspend, .resume = kv_dpm_resume, .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 60377747bab4..c7518b13e787 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -831,15 +831,6 @@ restart_search: return ps; } break; - case POWER_STATE_TYPE_BALANCED: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; case POWER_STATE_TYPE_PERFORMANCE: if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { @@ -1018,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; int temp, size = sizeof(temp); - if (!adev->pm.dpm_enabled) - return; + mutex_lock(&adev->pm.mutex); + if (!adev->pm.dpm_enabled) { + mutex_unlock(&adev->pm.mutex); + return; + } if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&temp, @@ -1042,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) adev->pm.dpm.state = dpm_state; amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index eb4da3666e05..4c0e976004ba 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -30,16 +30,32 @@ #include "amdgpu_atombios.h" #include "amdgpu_dpm_internal.h" #include "amd_pcie.h" -#include "sid.h" +#include "atom.h" +#include "gfx_v6_0.h" #include "r600_dpm.h" +#include "sid.h" #include "si_dpm.h" -#include "atom.h" #include "../include/pptable.h" #include <linux/math64.h> #include <linux/seq_file.h> #include <linux/firmware.h> #include <legacy_dpm.h> +#include "bif/bif_3_0_d.h" +#include "bif/bif_3_0_sh_mask.h" + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#include "gca/gfx_6_0_d.h" +#include "gca/gfx_6_0_sh_mask.h" + +#include"gmc/gmc_6_0_d.h" +#include"gmc/gmc_6_0_sh_mask.h" + +#include "smu/smu_6_0_d.h" +#include "smu/smu_6_0_sh_mask.h" + #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c @@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) if (xclk == 0) return 0; - cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; + cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK; cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); wintime = (cac_window_size * 100) / xclk; @@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev, if (adev->pm.dpm.sq_ramping_threshold == 0) return -EINVAL; - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev, if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && enable_sq_ramping) { - sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); - sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); - sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); - sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); - sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); + sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT; + sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT; } else { - sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; - sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK | + SQ_POWER_THROTTLE__MIN_POWER_MASK; + sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | + SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | + SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; } smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); @@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) if (!cac_tables) return -ENOMEM; - reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; - reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); - WREG32(CG_CAC_CTRL, reg); + reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK; + reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT); + WREG32(mmCG_CAC_CTRL, reg); si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; si_pi->dyn_powertune_data.dc_pwr_value = @@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev) ret = si_calculate_sclk_params(adev, sclk, &sclk_params); if (ret) break; - p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; - fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; - clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; - clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT; + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT; + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT; + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT; fb_div &= ~0x00001FFF; fb_div >>= 1; @@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev) WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; - tmp = RREG32(MC_ARB_RAMCFG); - row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; - column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; - bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; + tmp = RREG32(mmMC_ARB_RAMCFG); + row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10; + column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8; + bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2; density = (1 << (row + column - 20 + bank)) * width; @@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) } if (want_thermal_protection) { - WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); + WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK); if (pi->thermal_protection) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } else { - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } } @@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev, static void si_start_dpm(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK); } static void si_stop_dpm(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK); } static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK); else - WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK); } @@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, PPSMC_Msg msg, u32 parameter) { - WREG32(SMC_SCRATCH0, parameter); + WREG32(mmSMC_SCRATCH0, parameter); return amdgpu_si_send_msg_to_smc(adev, msg); } @@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); - si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); - si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); - si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); - si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); - si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); - si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); + si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL); + si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2); + si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3); + si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4); + si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM); + si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2); si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); @@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); else - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } static void si_enable_acpi_power_management(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK); } #if 0 @@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev) tmp = 0x1; if (eg_pi->sclk_deep_sleep) { - WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); - WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, - ~AUTOSCALE_ON_SS_CLEAR); + WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK); + WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK, + ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK); } } @@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev) u32 tmp, pipe; int i; - tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); if (adev->pm.dpm.new_active_crtc_count > 0) - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; else - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); + tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; if (adev->pm.dpm.new_active_crtc_count > 1) - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; else - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); + tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; - WREG32(CG_DISPLAY_GAP_CNTL, tmp); + WREG32(mmCG_DISPLAY_GAP_CNTL, tmp); tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; @@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) if (enable) { if (pi->sclk_ss) - WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK); } else { - WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); - WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); + WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK); } } @@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev) &pi->pbsu); - pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); - pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); + pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT); + pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT); - WREG32(CG_BSP, pi->dsp); + WREG32(mmCG_BSP, pi->dsp); } static void si_program_git(struct amdgpu_device *adev) { - WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); + WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK); } static void si_program_tp(struct amdgpu_device *adev) @@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev) enum r600_td td = R600_TD_DFLT; for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) - WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); + WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT)); if (td == R600_TD_AUTO) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK); else - WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK); if (td == R600_TD_UP) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK); if (td == R600_TD_DOWN) - WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK); } static void si_program_tpp(struct amdgpu_device *adev) { - WREG32(CG_TPC, R600_TPC_DFLT); + WREG32(mmCG_TPC, R600_TPC_DFLT); } static void si_program_sstp(struct amdgpu_device *adev) { - WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); + WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT)); } static void si_enable_display_gap(struct amdgpu_device *adev) { - u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); + u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL); - tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | - DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); + tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); + tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT | + R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT); - tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | - DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); - WREG32(CG_DISPLAY_GAP_CNTL, tmp); + tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK); + tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT | + R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT); + WREG32(mmCG_DISPLAY_GAP_CNTL, tmp); } static void si_program_vc(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); - WREG32(CG_FTV, pi->vrc); + WREG32(mmCG_FTV, pi->vrc); } static void si_clear_vc(struct amdgpu_device *adev) { - WREG32(CG_FTV, 0); + WREG32(mmCG_FTV, 0); } static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) @@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, u32 dram_rows; u32 dram_refresh_rate; u32 mc_arb_rfsh_rate; - u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; + u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT; if (tmp >= 4) dram_rows = 16384; @@ -4755,13 +4774,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, u32 dram_timing; u32 dram_timing2; u32 burst_time; + int ret; arb_regs->mc_arb_rfsh_rate = (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); + ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk, + pl->mclk); + if (ret) + return ret; dram_timing = RREG32(MC_ARB_DRAM_TIMING); dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); @@ -4907,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd); - reg = CG_R(0xffff) | CG_L(0); + reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT; table->initialState.level.aT = cpu_to_be32(reg); table->initialState.level.bSP = cpu_to_be32(pi->dsp); table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen; @@ -4933,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, table->initialState.level.dpm2.BelowSafeInc = 0; table->initialState.level.dpm2.PwrEfficiencyRatio = 0; - reg = MIN_POWER_MASK | MAX_POWER_MASK; + reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | + SQ_POWER_THROTTLE__MAX_POWER_MASK; table->initialState.level.SQPowerThrottle = cpu_to_be32(reg); - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | + SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | + SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; @@ -5055,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(4); + spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT; table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); @@ -5100,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, table->ACPIState.level.dpm2.BelowSafeInc = 0; table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0; - reg = MIN_POWER_MASK | MAX_POWER_MASK; + reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK; table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg); - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; @@ -5248,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev) if (ret) return ret; - WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); - WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); + WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control); + WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter); lane_width = amdgpu_get_pcie_lanes(adev); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); @@ -5292,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, do_div(tmp, reference_clock); fbdiv = (u32) tmp; - spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); - spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); - spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); + spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK); + spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT; + spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT; - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(2); + spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT; - spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; - spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); - spll_func_cntl_3 |= SPLL_DITHEN; + spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT; + spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK; if (pi->sclk_ss) { struct amdgpu_atom_ss ss; @@ -5312,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); - cg_spll_spread_spectrum &= ~CLK_S_MASK; - cg_spll_spread_spectrum |= CLK_S(clk_s); - cg_spll_spread_spectrum |= SSEN; + cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK; + cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT; + cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK; - cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; - cg_spll_spread_spectrum_2 |= CLK_V(clk_v); + cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK; + cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT; } } @@ -5467,7 +5491,6 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, int ret; bool dll_state_on; u16 std_vddc; - bool gmc_pg = false; if (eg_pi->pcie_performance_request && (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID)) @@ -5484,12 +5507,9 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (pl->mclk <= pi->mclk_stutter_mode_threshold) && !eg_pi->uvd_enabled && - (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && + (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) { level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; - - if (gmc_pg) - level->mcFlags |= SISLANDS_SMC_MC_PG_EN; } if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { @@ -5581,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev, return -EINVAL; if (state->performance_level_count < 2) { - a_t = CG_R(0xffff) | CG_L(0); + a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT; smc_state->levels[0].aT = cpu_to_be32(a_t); return 0; } @@ -5602,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev, t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; } - a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; - a_t |= CG_R(t_l * pi->bsp / 20000); + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK; + a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT; smc_state->levels[i].aT = cpu_to_be32(a_t); high_bsp = (i == state->performance_level_count - 2) ? pi->pbsp : pi->bsp; - a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); + a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT; smc_state->levels[i + 1].aT = cpu_to_be32(a_t); } @@ -6182,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev, static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK); else - WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK); } static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, @@ -6206,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) { u32 speed_cntl; - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; - speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; + speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; return (u16)speed_cntl; } @@ -6414,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev) static int si_thermal_enable_alert(struct amdgpu_device *adev, bool enable) { - u32 thermal_int = RREG32(CG_THERMAL_INT); + u32 thermal_int = RREG32(mmCG_THERMAL_INT); if (enable) { PPSMC_Result result; - thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - WREG32(CG_THERMAL_INT, thermal_int); + thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK); + WREG32(mmCG_THERMAL_INT, thermal_int); result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); if (result != PPSMC_Result_OK) { DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); return -EINVAL; } } else { - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - WREG32(CG_THERMAL_INT, thermal_int); + thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32(mmCG_THERMAL_INT, thermal_int); } return 0; @@ -6449,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev, return -EINVAL; } - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); - WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); + WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK); + WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK); + WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK); adev->pm.dpm.thermal.min_temp = low_temp; adev->pm.dpm.thermal.max_temp = high_temp; @@ -6465,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) u32 tmp; if (si_pi->fan_ctrl_is_in_default_mode) { - tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; + tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; si_pi->fan_ctrl_default_mode = tmp; - tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; + tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT; si_pi->t_min = tmp; si_pi->fan_ctrl_is_in_default_mode = false; } - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(0); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(mode); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); } static int si_thermal_setup_fan_table(struct amdgpu_device *adev) @@ -6497,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev) return 0; } - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; if (duty100 == 0) { adev->pm.dpm.fan.ucode_fan_control = false; @@ -6533,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev) reference_clock) / 1600); fan_table.fdo_max = cpu_to_be16((u16)duty100); - tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; + tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT; fan_table.temp_src = (uint8_t)tmp; ret = amdgpu_si_copy_bytes_to_smc(adev, @@ -6592,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle, if (adev->pm.no_fan) return -ENOENT; - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT; if (duty100 == 0) return -EINVAL; @@ -6623,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle, if (speed > 255) return -EINVAL; - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; if (duty100 == 0) return -EINVAL; @@ -6632,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle, do_div(tmp64, 255); duty = (u32)tmp64; - tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; - tmp |= FDO_STATIC_DUTY(duty); - WREG32(CG_FDO_CTRL0, tmp); + tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK; + tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT; + WREG32(mmCG_FDO_CTRL0, tmp); return 0; } @@ -6674,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode) if (si_pi->fan_is_controlled_by_smc) return 0; - tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; - *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT); + tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT); return 0; } @@ -6693,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, if (adev->pm.fan_pulses_per_revolution == 0) return -ENOENT; - tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; + tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT; if (tach_period == 0) return -ENOENT; @@ -6722,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, si_fan_ctrl_stop_smc_fan_control(adev); tach_period = 60 * xclk * 10000 / (8 * speed); - tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; - tmp |= TARGET_PERIOD(tach_period); - WREG32(CG_TACH_CTRL, tmp); + tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK; + tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT; + WREG32(mmCG_TACH_CTRL, tmp); si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); @@ -6738,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) u32 tmp; if (!si_pi->fan_ctrl_is_in_default_mode) { - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(si_pi->t_min); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); si_pi->fan_ctrl_is_in_default_mode = true; } } @@ -6762,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev) u32 tmp; if (adev->pm.fan_pulses_per_revolution) { - tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; - tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); - WREG32(CG_TACH_CTRL, tmp); + tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK; + tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT; + WREG32(mmCG_TACH_CTRL, tmp); } - tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; - tmp |= TACH_PWM_RESP_RATE(0x28); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK; + tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); } static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) @@ -7532,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle, struct si_ps *ps = si_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; + (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT; if (current_index >= ps->performance_level_count) { seq_printf(m, "invalid dpm profile %d\n", current_index); @@ -7556,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; default: break; @@ -7573,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; default: break; @@ -7623,10 +7643,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int si_dpm_late_init(void *handle) +static int si_dpm_late_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -7652,7 +7672,6 @@ static int si_dpm_late_init(void *handle) static int si_dpm_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; int err; DRM_DEBUG("\n"); @@ -7712,20 +7731,20 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_smc.bin", chip_name); if (err) { - DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", - err, fw_name); + DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n", + err, chip_name); amdgpu_ucode_release(&adev->pm.fw); } return err; } -static int si_dpm_sw_init(void *handle) +static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -7769,9 +7788,9 @@ dpm_failed: return ret; } -static int si_dpm_sw_fini(void *handle) +static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -7780,15 +7799,16 @@ static int si_dpm_sw_fini(void *handle) return 0; } -static int si_dpm_hw_init(void *handle) +static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) @@ -7796,12 +7816,13 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); return ret; } -static int si_dpm_hw_fini(void *handle) +static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) si_dpm_disable(adev); @@ -7809,62 +7830,69 @@ static int si_dpm_hw_fini(void *handle) return 0; } -static int si_dpm_suspend(void *handle) +static int si_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + cancel_work_sync(&adev->pm.dpm.thermal.work); if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } + return 0; } -static int si_dpm_resume(void *handle) +static int si_dpm_resume(struct amdgpu_ip_block *ip_block) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + struct amdgpu_device *adev = ip_block->adev; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + + return ret; } -static bool si_dpm_is_idle(void *handle) +static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return true; } -static int si_dpm_wait_for_idle(void *handle) +static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return 0; } -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int si_dpm_set_clockgating_state(void *handle, +static int si_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; } -static int si_dpm_set_powergating_state(void *handle, +static int si_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -7877,8 +7905,8 @@ static int si_dpm_get_temp(void *handle) int actual_temp = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> - CTF_TEMP_SHIFT; + temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; if (temp & 0x200) actual_temp = 255; @@ -7928,20 +7956,16 @@ static void si_dpm_print_power_state(void *handle, DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - if (adev->asic_type >= CHIP_TAHITI) - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); - else - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } amdgpu_dpm_print_ps_status(adev, rps); } -static int si_dpm_early_init(void *handle) +static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &si_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -8012,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx, struct si_ps *ps = si_get_ps(rps); uint32_t sclk, mclk; u32 pl_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; + (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT; /* size must be at least 4 bytes for all sensors */ if (*size < 4) @@ -8057,7 +8081,6 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = { .resume = si_dpm_resume, .is_idle = si_dpm_is_idle, .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c index 8f994ffa9cd1..4e65ab9e931c 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c @@ -30,6 +30,12 @@ #include "amdgpu_ucode.h" #include "sislands_smc.h" +#include "smu/smu_6_0_d.h" +#include "smu/smu_6_0_sh_mask.h" + +#include "gca/gfx_6_0_d.h" +#include "gca/gfx_6_0_sh_mask.h" + static int si_set_smc_sram_address(struct amdgpu_device *adev, u32 smc_address, u32 limit) { @@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev, if ((smc_address + 3) > limit) return -EINVAL; - WREG32(SMC_IND_INDEX_0, smc_address); - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + WREG32(mmSMC_IND_INDEX_0, smc_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); return 0; } @@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; @@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - original_data = RREG32(SMC_IND_DATA_0); + original_data = RREG32(mmSMC_IND_DATA_0); extra_shift = 8 * (4 - byte_count); while (byte_count > 0) { @@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); } done: @@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev) { u32 tmp; - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | RST_REG; @@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, if (!amdgpu_si_is_smc_running(adev)) return PPSMC_Result_Failed; - WREG32(SMC_MESSAGE_0, msg); + WREG32(mmSMC_MESSAGE_0, msg); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SMC_RESP_0); + tmp = RREG32(mmSMC_RESP_0); if (tmp != 0) break; udelay(1); } - return (PPSMC_Result)RREG32(SMC_RESP_0); + return (PPSMC_Result)RREG32(mmSMC_RESP_0); } PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) @@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) return -EINVAL; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, ucode_start_address); - WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); while (ucode_size >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); src += 4; ucode_size -= 4; } - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return 0; @@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, spin_lock_irqsave(&adev->smc_idx_lock, flags); ret = si_set_smc_sram_address(adev, smc_address, limit); if (ret == 0) - *value = RREG32(SMC_IND_DATA_0); + *value = RREG32(mmSMC_IND_DATA_0); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return ret; @@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, spin_lock_irqsave(&adev->smc_idx_lock, flags); ret = si_set_smc_sram_address(adev, smc_address, limit); if (ret == 0) - WREG32(SMC_IND_DATA_0, value); + WREG32(mmSMC_IND_DATA_0, value); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return ret; diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index aed0e2cefbf9..b48a031cbba0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); + if (!hwmgr->device) { + kfree(hwmgr); + return -ENOMEM; + } + mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; @@ -75,11 +80,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev) hwmgr = NULL; } -static int pp_early_init(void *handle) +static int pp_early_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amd_powerplay_create(adev); if (ret != 0) @@ -99,7 +103,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_dpm_thermal *range = &adev->pm.dpm.thermal; - uint32_t gpu_temperature, size; + uint32_t gpu_temperature, size = sizeof(gpu_temperature); int ret; /* @@ -131,9 +135,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) orderly_poweroff(true); } -static int pp_sw_init(void *handle) +static int pp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; @@ -148,9 +152,9 @@ static int pp_sw_init(void *handle) return ret; } -static int pp_sw_fini(void *handle) +static int pp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; hwmgr_sw_fini(hwmgr); @@ -160,10 +164,10 @@ static int pp_sw_fini(void *handle) return 0; } -static int pp_hw_init(void *handle) +static int pp_hw_init(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; ret = hwmgr_hw_init(hwmgr); @@ -174,10 +178,9 @@ static int pp_hw_init(void *handle) return ret; } -static int pp_hw_fini(void *handle) +static int pp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -217,9 +220,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) } } -static int pp_late_init(void *handle) +static int pp_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; if (hwmgr && hwmgr->pm_en) @@ -231,9 +234,9 @@ static int pp_late_init(void *handle) return 0; } -static void pp_late_fini(void *handle) +static void pp_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.smu_prv_buffer) amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); @@ -241,30 +244,20 @@ static void pp_late_fini(void *handle) } -static bool pp_is_idle(void *handle) +static bool pp_is_idle(struct amdgpu_ip_block *ip_block) { return false; } -static int pp_wait_for_idle(void *handle) -{ - return 0; -} - -static int pp_sw_reset(void *handle) -{ - return 0; -} - -static int pp_set_powergating_state(void *handle, +static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; } -static int pp_suspend(void *handle) +static int pp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -272,15 +265,14 @@ static int pp_suspend(void *handle) return hwmgr_suspend(hwmgr); } -static int pp_resume(void *handle) +static int pp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; return hwmgr_resume(hwmgr); } -static int pp_set_clockgating_state(void *handle, +static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; @@ -298,8 +290,6 @@ static const struct amd_ip_funcs pp_ip_funcs = { .suspend = pp_suspend, .resume = pp_resume, .is_idle = pp_is_idle, - .wait_for_idle = pp_wait_for_idle, - .soft_reset = pp_sw_reset, .set_clockgating_state = pp_set_clockgating_state, .set_powergating_state = pp_set_powergating_state, }; @@ -927,7 +917,7 @@ static int pp_dpm_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, bool en) { struct pp_hwmgr *hwmgr = handle; - long workload; + long workload[1]; uint32_t index; if (!hwmgr || !hwmgr->pm_en) @@ -945,12 +935,12 @@ static int pp_dpm_switch_power_profile(void *handle, hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; } else { hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; } if (type == PP_SMC_POWER_PROFILE_COMPUTE && @@ -960,7 +950,7 @@ static int pp_dpm_switch_power_profile(void *handle, } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); return 0; } @@ -1242,7 +1232,9 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate) } static int pp_set_powergating_by_smu(void *handle, - uint32_t block_type, bool gate) + uint32_t block_type, + bool gate, + int inst) { int ret = 0; @@ -1371,7 +1363,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return phm_set_active_display_count(hwmgr, count); } -static bool pp_get_asic_baco_capability(void *handle) +static int pp_get_asic_baco_capability(void *handle) { struct pp_hwmgr *hwmgr = handle; @@ -1379,10 +1371,10 @@ static bool pp_get_asic_baco_capability(void *handle) return false; if (!(hwmgr->not_vf && amdgpu_dpm) || - !hwmgr->hwmgr_func->get_asic_baco_capability) + !hwmgr->hwmgr_func->get_bamaco_support) return false; - return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr); + return hwmgr->hwmgr_func->get_bamaco_support(hwmgr); } static int pp_get_asic_baco_state(void *handle, int *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c index 90452b66e107..a59677cf8dfc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c @@ -149,16 +149,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr) return 0; } -int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powerdown_uvd != NULL) - return hwmgr->hwmgr_func->powerdown_uvd(hwmgr); - return 0; -} - - int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c index 1d829402cd2e..18f00038d844 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c @@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) { int result; unsigned int i; - unsigned int table_entries; struct pp_power_state *state; - int size; + int size, table_entries; if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) return 0; @@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) if (hwmgr->hwmgr_func->get_power_state_size == NULL) return 0; - hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); + table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); - hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + + size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + sizeof(struct pp_power_state); - if (table_entries == 0 || size == 0) { + if (table_entries <= 0 || size == 0) { pr_warn("Please check whether power state management is supported on this asic\n"); + hwmgr->num_ps = 0; + hwmgr->ps_size = 0; return 0; } + hwmgr->num_ps = table_entries; + hwmgr->ps_size = size; hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL); if (hwmgr->ps == NULL) @@ -269,7 +272,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set struct pp_power_state *new_ps) { uint32_t index; - long workload; + long workload[1]; if (hwmgr->not_vf) { if (!skip_display_settings) @@ -294,10 +297,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; - if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); } return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index b1b4c09c3467..8d40ed0f0e83 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -28,7 +28,6 @@ #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" -#include "ppevvmath.h" #define MEM_ID_MASK 0xff000000 #define MEM_ID_SHIFT 24 @@ -73,8 +72,9 @@ static int atomctrl_retrieve_ac_timing( j++; } else if ((table->mc_reg_address[i].uc_pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { - table->mc_reg_table_entry[num_ranges].mc_data[i] = - table->mc_reg_table_entry[num_ranges].mc_data[i-1]; + if (i) + table->mc_reg_table_entry[num_ranges].mc_data[i] = + table->mc_reg_table_entry[num_ranges].mc_data[i-1]; } } num_ranges++; @@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table( vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); @@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2( vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); @@ -676,433 +684,6 @@ bool atomctrl_get_pp_assign_pin( return bRet; } -int atomctrl_calculate_voltage_evv_on_sclk( - struct pp_hwmgr *hwmgr, - uint8_t voltage_type, - uint32_t sclk, - uint16_t virtual_voltage_Id, - uint16_t *voltage, - uint16_t dpm_level, - bool debug) -{ - ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - struct amdgpu_device *adev = hwmgr->adev; - EFUSE_LINEAR_FUNC_PARAM sRO_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; - EFUSE_INPUT_PARAMETER sInput_FuseValues; - READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; - - uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; - fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; - fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; - fInt fLkg_FT, repeat; - fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; - fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; - fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; - fInt fSclk_margin, fSclk, fEVV_V; - fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; - uint32_t ul_FT_Lkg_V0NORM; - fInt fLn_MaxDivMin, fMin, fAverage, fRange; - fInt fRoots[2]; - fInt fStepSize = GetScaledFraction(625, 100000); - - int result; - - getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - smu_atom_get_data_table(hwmgr->adev, - GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), - NULL, NULL, NULL); - - if (!getASICProfilingInfo) - return -1; - - if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) - return -1; - - /*----------------------------------------------------------- - *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL - *----------------------------------------------------------- - */ - fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); - - switch (dpm_level) { - case 1: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); - break; - case 2: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); - break; - case 3: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); - break; - case 4: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); - break; - case 5: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); - break; - case 6: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); - break; - case 7: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); - break; - default: - pr_err("DPM Level not supported\n"); - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); - } - - /*------------------------- - * DECODING FUSE VALUES - * ------------------------ - */ - /*Decode RO_Fused*/ - sRO_fuse = getASICProfilingInfo->sRoFuse; - - sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - /* Finally, the actual fuse value */ - ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); - fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); - fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); - - sCACm_fuse = getASICProfilingInfo->sCACm; - - sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); - - fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); - - sCACb_fuse = getASICProfilingInfo->sCACb; - - sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); - - fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); - - sKt_Beta_fuse = getASICProfilingInfo->sKt_b; - - sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); - - fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, - fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); - - sKv_m_fuse = getASICProfilingInfo->sKv_m; - - sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - if (result) - return result; - - ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); - fRange = fMultiply(fRange, ConvertToFraction(-1)); - - fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, - fAverage, fRange, sKv_m_fuse.ucEfuseLength); - - sKv_b_fuse = getASICProfilingInfo->sKv_b; - - sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); - - fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, - fAverage, fRange, sKv_b_fuse.ucEfuseLength); - - /* Decoding the Leakage - No special struct container */ - /* - * usLkgEuseIndex=56 - * ucLkgEfuseBitLSB=6 - * ucLkgEfuseLength=10 - * ulLkgEncodeLn_MaxDivMin=69077 - * ulLkgEncodeMax=1000000 - * ulLkgEncodeMin=1000 - * ulEfuseLogisticAlpha=13 - */ - - sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; - sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; - sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); - fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); - - fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, - fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); - fLkg_FT = fFT_Lkg_V0NORM; - - /*------------------------------------------- - * PART 2 - Grabbing all required values - *------------------------------------------- - */ - fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - - fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); - fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); - fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - - fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); - - fMargin_FMAX_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); - fMargin_Plat_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); - fMargin_FMAX_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); - fMargin_Plat_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); - - fMargin_DC_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); - fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); - - fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); - fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); - fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); - fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); - fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); - - fSclk = GetScaledFraction(sclk, 100); - - fV_max = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); - fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); - fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); - fV_FT = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); - fV_min = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); - - /*----------------------- - * PART 3 - *----------------------- - */ - - fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); - fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); - fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), - fAdd(fMultiply(fSM_A3, fSclk), - fSubtract(fSM_A7, fRO_fused))))); - - fVDDC_base = fSubtract(fRO_fused, - fSubtract(fMargin_RO_c, - fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); - fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); - - repeat = fSubtract(fVDDC_base, - fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); - - fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, - fGetSquare(repeat)), - fAdd(fMultiply(fMargin_RO_b, repeat), - fMargin_RO_c)); - - fDC_SCLK = fSubtract(fRO_fused, - fSubtract(fRO_DC_margin, - fSubtract(fSM_A3, - fMultiply(fSM_A2, repeat)))); - fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); - - fSigma_DC = fSubtract(fSclk, fDC_SCLK); - - fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); - fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); - fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); - fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); - - fSquared_Sigma_DC = fGetSquare(fSigma_DC); - fSquared_Sigma_CR = fGetSquare(fSigma_CR); - fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); - - fSclk_margin = fAdd(fMicro_FMAX, - fAdd(fMicro_CR, - fAdd(fMargin_fixed, - fSqrt(fAdd(fSquared_Sigma_FMAX, - fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); - /* - fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; - fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; - fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; - */ - - fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); - fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); - fC_Term = fAdd(fRO_DC_margin, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), - fAdd(fSclk, fSclk_margin)), - fAdd(fMultiply(fSM_A3, - fAdd(fSclk, fSclk_margin)), - fSubtract(fSM_A7, fRO_fused))))); - - SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); - - if (GreaterThan(fRoots[0], fRoots[1])) - fEVV_V = fRoots[1]; - else - fEVV_V = fRoots[0]; - - if (GreaterThan(fV_min, fEVV_V)) - fEVV_V = fV_min; - else if (GreaterThan(fEVV_V, fV_max)) - fEVV_V = fSubtract(fV_max, fStepSize); - - fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); - - /*----------------- - * PART 4 - *----------------- - */ - - fV_x = fV_min; - - while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { - fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( - fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), - fGetSquare(fV_x)), fDerateTDP); - - fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, - fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, - fT_prod), fKv_b_fused), fV_x)), fV_x))); - fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_prod))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_FT))); - - fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); - - fTDP_Current = fDivide(fTDP_Power, fV_x); - - fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), - ConvertToFraction(10))); - - fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); - - if (GreaterThan(fV_max, fV_NL) && - (GreaterThan(fV_NL, fEVV_V) || - Equal(fV_NL, fEVV_V))) { - fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); - - *voltage = (uint16_t)fV_NL.partial.real; - break; - } else - fV_x = fAdd(fV_x, fStepSize); - } - - return result; -} - /** * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager @@ -1419,6 +1000,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr GetIndexIntoMasterTable(DATA, SMU_Info), &size, &frev, &crev); + if (!psmu_info) + return -EINVAL; for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index 1f987e846628..22b0ac12df97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, pp_atomctrl_clock_dividers_kong *dividers); extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t *efuse); -extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers); extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c index 82d540334318..6120f14caab0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c @@ -158,84 +158,6 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, return result; } - -static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table( - struct pp_hwmgr *hwmgr) -{ - const void *table_address; - uint16_t idx; - - idx = GetIndexIntoMasterDataTable(gpio_pin_lut); - table_address = smu_atom_get_data_table(hwmgr->adev, - idx, NULL, NULL, NULL); - PP_ASSERT_WITH_CODE(table_address, - "Error retrieving BIOS Table Address!", - return NULL); - - return (struct atom_gpio_pin_lut_v2_1 *)table_address; -} - -static bool pp_atomfwctrl_lookup_gpio_pin( - struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table, - const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) -{ - unsigned int size = le16_to_cpu( - gpio_lookup_table->table_header.structuresize); - unsigned int offset = - offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]); - unsigned long start = (unsigned long)gpio_lookup_table; - - while (offset < size) { - const struct atom_gpio_pin_assignment *pin_assignment = - (const struct atom_gpio_pin_assignment *)(start + offset); - - if (pin_id == pin_assignment->gpio_id) { - gpio_pin_assignment->uc_gpio_pin_bit_shift = - pin_assignment->gpio_bitshift; - gpio_pin_assignment->us_gpio_pin_aindex = - le16_to_cpu(pin_assignment->data_a_reg_index); - return true; - } - offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1; - } - return false; -} - -/* - * Returns TRUE if the given pin id find in lookup table. - */ -bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, - const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) -{ - bool ret = false; - struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table = - pp_atomfwctrl_get_gpio_lookup_table(hwmgr); - - /* If we cannot find the table do NOT try to control this voltage. */ - PP_ASSERT_WITH_CODE(gpio_lookup_table, - "Could not find GPIO lookup Table in BIOS.", - return false); - - ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table, - pin_id, gpio_pin_assignment); - - return ret; -} - -/* - * Enter to SelfRefresh mode. - * @param hwmgr - */ -int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr) -{ - /* 0 - no action - * 1 - leave power to video memory always on - */ - return 0; -} - /** pp_atomfwctrl_get_gpu_pll_dividers_vega10(). * * @param hwmgr input parameter: pointer to HwMgr diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h index e86e05c786d9..0d62903d5676 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h @@ -217,9 +217,6 @@ struct pp_atomfwctrl_smc_dpm_parameters { int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, uint32_t clock_type, uint32_t clock_value, struct pp_atomfwctrl_clock_dividers_soc15 *dividers); -int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr); -bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id, - struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment); int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h deleted file mode 100644 index 6f54c410c2f9..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <asm/div64.h> - -#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ - -#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */ - -#define SHIFTED_2 (2 << SHIFT_AMOUNT) -#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */ - -/* ------------------------------------------------------------------------------- - * NEW TYPE - fINT - * ------------------------------------------------------------------------------- - * A variable of type fInt can be accessed in 3 ways using the dot (.) operator - * fInt A; - * A.full => The full number as it is. Generally not easy to read - * A.partial.real => Only the integer portion - * A.partial.decimal => Only the fractional portion - */ -typedef union _fInt { - int full; - struct _partial { - unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/ - int real: 32 - SHIFT_AMOUNT; - } partial; -} fInt; - -/* ------------------------------------------------------------------------------- - * Function Declarations - * ------------------------------------------------------------------------------- - */ -static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -static fInt fNegate(fInt); /* Returns -1 * input fInt value */ -static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -static fInt fDivide (fInt A, fInt B); /* Returns A/B */ -static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -static int uAbs(int); /* Returns the Absolute value of the Int */ -static int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ - -/* Fuse decoding functions - * ------------------------------------------------------------------------------------- - */ -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); - -/* Internal Support Functions - Use these ONLY for testing or adding to internal functions - * ------------------------------------------------------------------------------------- - * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. - */ -static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ -static fInt fNegate(fInt); - -static int uGetScaledDecimal (fInt); /* Internal function */ -static int GetReal (fInt A); /* Internal function */ - -/* ------------------------------------------------------------------------------------- - * TROUBLESHOOTING INFORMATION - * ------------------------------------------------------------------------------------- - * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767) - * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767) - * 3) fMultiply - OutputOutOfRangeException: - * 4) fGetSquare - OutputOutOfRangeException: - * 5) fDivide - DivideByZeroException - * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number - */ - -/* ------------------------------------------------------------------------------------- - * START OF CODE - * ------------------------------------------------------------------------------------- - */ -static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ -{ - uint32_t i; - bool bNegated = false; - - fInt fPositiveOne = ConvertToFraction(1); - fInt fZERO = ConvertToFraction(0); - - fInt lower_bound = Divide(78, 10000); - fInt solution = fPositiveOne; /*Starting off with baseline of 1 */ - fInt error_term; - - static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - - if (GreaterThan(fZERO, exponent)) { - exponent = fNegate(exponent); - bNegated = true; - } - - while (GreaterThan(exponent, lower_bound)) { - for (i = 0; i < 11; i++) { - if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) { - exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000)); - solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000)); - } - } - } - - error_term = fAdd(fPositiveOne, exponent); - - solution = fMultiply(solution, error_term); - - if (bNegated) - solution = fDivide(fPositiveOne, solution); - - return solution; -} - -static fInt fNaturalLog(fInt value) -{ - uint32_t i; - fInt upper_bound = Divide(8, 1000); - fInt fNegativeOne = ConvertToFraction(-1); - fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */ - fInt error_term; - - static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - - while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) { - for (i = 0; i < 10; i++) { - if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) { - value = fDivide(value, GetScaledFraction(k_array[i], 10000)); - solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000)); - } - } - } - - error_term = fAdd(fNegativeOne, value); - - return fAdd(solution, error_term); -} - -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_decoded_value; - - f_decoded_value = fDivide(f_fuse_value, f_bit_max_value); - f_decoded_value = fMultiply(f_decoded_value, f_range); - f_decoded_value = fAdd(f_decoded_value, f_min); - - return f_decoded_value; -} - - -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_CONSTANT_NEG13 = ConvertToFraction(-13); - fInt f_CONSTANT1 = ConvertToFraction(1); - - fInt f_decoded_value; - - f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1); - f_decoded_value = fNaturalLog(f_decoded_value); - f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13)); - f_decoded_value = fAdd(f_decoded_value, f_average); - - return f_decoded_value; -} - -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) -{ - fInt fLeakage; - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse)); - fLeakage = fDivide(fLeakage, f_bit_max_value); - fLeakage = fExponential(fLeakage); - fLeakage = fMultiply(fLeakage, f_min); - - return fLeakage; -} - -static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt fNegate(fInt X) -{ - fInt CONSTANT_NEGONE = ConvertToFraction(-1); - return fMultiply(X, CONSTANT_NEGONE); -} - -static fInt Convert_ULONG_ToFraction(uint32_t X) -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt GetScaledFraction(int X, int factor) -{ - int times_shifted, factor_shifted; - bool bNEGATED; - fInt fValue; - - times_shifted = 0; - factor_shifted = 0; - bNEGATED = false; - - if (X < 0) { - X = -1*X; - bNEGATED = true; - } - - if (factor < 0) { - factor = -1*factor; - bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */ - } - - if ((X > MAX) || factor > MAX) { - if ((X/factor) <= MAX) { - while (X > MAX) { - X = X >> 1; - times_shifted++; - } - - while (factor > MAX) { - factor = factor >> 1; - factor_shifted++; - } - } else { - fValue.full = 0; - return fValue; - } - } - - if (factor == 1) - return ConvertToFraction(X); - - fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); - - fValue.full = fValue.full << times_shifted; - fValue.full = fValue.full >> factor_shifted; - - return fValue; -} - -/* Addition using two fInts */ -static fInt fAdd (fInt X, fInt Y) -{ - fInt Sum; - - Sum.full = X.full + Y.full; - - return Sum; -} - -/* Addition using two fInts */ -static fInt fSubtract (fInt X, fInt Y) -{ - fInt Difference; - - Difference.full = X.full - Y.full; - - return Difference; -} - -static bool Equal(fInt A, fInt B) -{ - if (A.full == B.full) - return true; - else - return false; -} - -static bool GreaterThan(fInt A, fInt B) -{ - if (A.full > B.full) - return true; - else - return false; -} - -static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ -{ - fInt Product; - int64_t tempProduct; - - /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/ - /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION - bool X_LessThanOne, Y_LessThanOne; - - X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0); - Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0); - - if (X_LessThanOne && Y_LessThanOne) { - Product.full = X.full * Y.full; - return Product - }*/ - - tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */ - tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */ - Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */ - - return Product; -} - -static fInt fDivide (fInt X, fInt Y) -{ - fInt fZERO, fQuotient; - int64_t longlongX, longlongY; - - fZERO = ConvertToFraction(0); - - if (Equal(Y, fZERO)) - return fZERO; - - longlongX = (int64_t)X.full; - longlongY = (int64_t)Y.full; - - longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */ - - div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */ - - fQuotient.full = (int)longlongX; - return fQuotient; -} - -static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ -{ - fInt fullNumber, scaledDecimal, scaledReal; - - scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */ - - scaledDecimal.full = uGetScaledDecimal(A); - - fullNumber = fAdd(scaledDecimal, scaledReal); - - return fullNumber.full; -} - -static fInt fGetSquare(fInt A) -{ - return fMultiply(A, A); -} - -/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -static fInt fSqrt(fInt num) -{ - fInt F_divide_Fprime, Fprime; - fInt test; - fInt twoShifted; - int seed, counter, error; - fInt x_new, x_old, C, y; - - fInt fZERO = ConvertToFraction(0); - - /* (0 > num) is the same as (num < 0), i.e., num is negative */ - - if (GreaterThan(fZERO, num) || Equal(fZERO, num)) - return fZERO; - - C = num; - - if (num.partial.real > 3000) - seed = 60; - else if (num.partial.real > 1000) - seed = 30; - else if (num.partial.real > 100) - seed = 10; - else - seed = 2; - - counter = 0; - - if (Equal(num, fZERO)) /*Square Root of Zero is zero */ - return fZERO; - - twoShifted = ConvertToFraction(2); - x_new = ConvertToFraction(seed); - - do { - counter++; - - x_old.full = x_new.full; - - test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */ - y = fSubtract(test, C); /*y = f(x) = x^2 - C; */ - - Fprime = fMultiply(twoShifted, x_old); - F_divide_Fprime = fDivide(y, Fprime); - - x_new = fSubtract(x_old, F_divide_Fprime); - - error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old); - - if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/ - return x_new; - - } while (uAbs(error) > 0); - - return x_new; -} - -static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) -{ - fInt *pRoots = &Roots[0]; - fInt temp, root_first, root_second; - fInt f_CONSTANT10, f_CONSTANT100; - - f_CONSTANT100 = ConvertToFraction(100); - f_CONSTANT10 = ConvertToFraction(10); - - while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { - A = fDivide(A, f_CONSTANT10); - B = fDivide(B, f_CONSTANT10); - C = fDivide(C, f_CONSTANT10); - } - - temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */ - temp = fMultiply(temp, C); /* root = 4*A*C */ - temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */ - temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */ - - root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */ - root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */ - - root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - *(pRoots + 0) = root_first; - *(pRoots + 1) = root_second; -} - -/* ----------------------------------------------------------------------------- - * SUPPORT FUNCTIONS - * ----------------------------------------------------------------------------- - */ - -/* Conversion Functions */ -static int GetReal (fInt A) -{ - return (A.full >> SHIFT_AMOUNT); -} - -static fInt Divide (int X, int Y) -{ - fInt A, B, Quotient; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Quotient = fDivide(A, B); - - return Quotient; -} - -static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ -{ - int dec[PRECISION]; - int i, scaledDecimal = 0, tmp = A.partial.decimal; - - for (i = 0; i < PRECISION; i++) { - dec[i] = tmp / (1 << SHIFT_AMOUNT); - tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); - tmp *= 10; - scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i); - } - - return scaledDecimal; -} - -static int uPow(int base, int power) -{ - if (power == 0) - return 1; - else - return (base)*uPow(base, power - 1); -} - -static int uAbs(int X) -{ - if (X < 0) - return (X * -1); - else - return X; -} - -static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) -{ - fInt solution; - - solution = fDivide(A, fStepSize); - solution.partial.decimal = 0; /*All fractional digits changes to 0 */ - - if (error_term) - solution.partial.real += 1; /*Error term of 1 added */ - - solution = fMultiply(solution, fStepSize); - solution = fAdd(solution, fStepSize); - - return solution; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h index 2cf2a7b12623..7711e892c31f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h @@ -163,8 +163,8 @@ typedef struct _ATOM_Tonga_State { typedef struct _ATOM_Tonga_State_Array { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_State entries[] __counted_by(ucNumEntries); } ATOM_Tonga_State_Array; typedef struct _ATOM_Tonga_MCLK_Dependency_Record { @@ -178,8 +178,8 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record { typedef struct _ATOM_Tonga_MCLK_Dependency_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_MCLK_Dependency_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_MCLK_Dependency_Table; typedef struct _ATOM_Tonga_SCLK_Dependency_Record { @@ -193,8 +193,8 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record { typedef struct _ATOM_Tonga_SCLK_Dependency_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_SCLK_Dependency_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_SCLK_Dependency_Table; typedef struct _ATOM_Polaris_SCLK_Dependency_Record { @@ -209,8 +209,8 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record { typedef struct _ATOM_Polaris_SCLK_Dependency_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Polaris_SCLK_Dependency_Record entries[] __counted_by(ucNumEntries); } ATOM_Polaris_SCLK_Dependency_Table; typedef struct _ATOM_Tonga_PCIE_Record { @@ -221,8 +221,8 @@ typedef struct _ATOM_Tonga_PCIE_Record { typedef struct _ATOM_Tonga_PCIE_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_PCIE_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_PCIE_Table; typedef struct _ATOM_Polaris10_PCIE_Record { @@ -234,8 +234,8 @@ typedef struct _ATOM_Polaris10_PCIE_Record { typedef struct _ATOM_Polaris10_PCIE_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Polaris10_PCIE_Record entries[] __counted_by(ucNumEntries); } ATOM_Polaris10_PCIE_Table; @@ -251,8 +251,8 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record { typedef struct _ATOM_Tonga_MM_Dependency_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_MM_Dependency_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_MM_Dependency_Table; typedef struct _ATOM_Tonga_Voltage_Lookup_Record { @@ -264,8 +264,8 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record { typedef struct _ATOM_Tonga_Voltage_Lookup_Table { UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */ + UCHAR ucNumEntries; + ATOM_Tonga_Voltage_Lookup_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_Voltage_Lookup_Table; typedef struct _ATOM_Tonga_Fan_Table { @@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record { typedef struct _ATOM_Tonga_VCE_State_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Tonga_VCE_State_Record entries[]; + ATOM_Tonga_VCE_State_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_VCE_State_Table; typedef struct _ATOM_Tonga_PowerTune_Table { @@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record { typedef struct _ATOM_Tonga_Hard_Limit_Table { UCHAR ucRevId; UCHAR ucNumEntries; - ATOM_Tonga_Hard_Limit_Record entries[]; + ATOM_Tonga_Hard_Limit_Record entries[] __counted_by(ucNumEntries); } ATOM_Tonga_Hard_Limit_Table; typedef struct _ATOM_Tonga_GPIO_Table { diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c index 17882f8dfdd3..6cfef1b295ab 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c @@ -978,8 +978,6 @@ static int init_thermal_controller( hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh = le16_to_cpu(tonga_fan_table->usPWMHigh); hwmgr->thermal_controller.advanceFanControlParameters.usTMax - = 10900; /* hard coded */ - hwmgr->thermal_controller.advanceFanControlParameters.usTMax = le16_to_cpu(tonga_fan_table->usTMax); hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode = tonga_fan_table->ucFanControlMode; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c index 5794b64507bf..f06b29e33ba4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c @@ -702,8 +702,6 @@ static int init_non_clock_fields(struct pp_hwmgr *hwmgr, ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - ps->pcie.lanes = 0; - ps->display.disableFrameModulation = false; rrr_index = (le32_to_cpu(pnon_clock_info->ulCapsAndSettings) & @@ -1185,6 +1183,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, fw_info = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); + PP_ASSERT_WITH_CODE(fw_info != NULL, + "Missing firmware info!", return -EINVAL); if ((fw_info->ucTableFormatRevision == 1) && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 02ba68d7c654..9a821563bc8e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); + if (ret) + return ret; /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ if (now == data->gfx_max_freq_limit/100) @@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, i == 2 ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); + if (ret) + return ret; for (i = 0; i < mclk_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -1310,13 +1314,17 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); + if (ret) + break; /* in units of 10KHZ */ *((uint32_t *)value) = sclk * 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk); + if (ret) + break; /* in units of 10KHZ */ *((uint32_t *)value) = mclk * 100; *size = 4; @@ -1550,7 +1558,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, } if (input[0] == 0) { - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); + if (ret) + return ret; + if (input[1] < min_freq) { pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", input[1], min_freq); @@ -1558,7 +1569,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, } smu10_data->gfx_actual_soft_min_freq = input[1]; } else if (input[0] == 1) { - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); + if (ret) + return ret; + if (input[1] > max_freq) { pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", input[1], max_freq); @@ -1573,10 +1587,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, pr_err("Input parameter number not correct\n"); return -EINVAL; } - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); - + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq); + if (ret) + return ret; smu10_data->gfx_actual_soft_min_freq = min_freq; + + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq); + if (ret) + return ret; + smu10_data->gfx_actual_soft_max_freq = max_freq; } else if (type == PP_OD_COMMIT_DPM_TABLE) { if (size != 0) { @@ -1623,7 +1642,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .apply_state_adjust_rules = smu10_apply_state_adjust_rules, .force_dpm_level = smu10_dpm_force_dpm_level, .get_power_state_size = smu10_get_power_state_size, - .powerdown_uvd = NULL, .powergate_uvd = smu10_powergate_vcn, .powergate_vce = NULL, .get_mclk = smu10_dpm_get_mclk, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index e8a9471c1898..ad60918aaae1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,7 +33,7 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) +int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; @@ -44,9 +44,9 @@ bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; - return false; + return 0; } int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index 73a773f4ce2e..750082ea74d8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c index f2bda3bcbbde..5e4c80f7b20a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c @@ -55,7 +55,7 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) return smu7_enable_disable_vce_dpm(hwmgr, !bgate); } -int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) +static int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h index fc8f8a6acc72..e56abbadc78b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h @@ -28,7 +28,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aa91730e4eaf..8da882c51856 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct smu7_hwmgr *data; int result = 0; @@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* Initalize Dynamic State Adjustment Rule Settings */ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - if (0 == result) { - struct amdgpu_device *adev = hwmgr->adev; + if (result) + goto fail; - data->is_tlu_enabled = false; + data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU7_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - data->pcie_gen_cap = adev->pm.pcie_gen_mask; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - else - data->pcie_spc_cap = 16; - data->pcie_lane_cap = adev->pm.pcie_mlw_mask; - - hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ -/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ - hwmgr->platform_descriptor.clockStep.engineClock = 500; - hwmgr->platform_descriptor.clockStep.memoryClock = 500; - smu7_thermal_parameter_init(hwmgr); - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - smu7_hwmgr_backend_fini(hwmgr); - } + data->pcie_gen_cap = adev->pm.pcie_gen_mask; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + else + data->pcie_spc_cap = 16; + data->pcie_lane_cap = adev->pm.pcie_mlw_mask; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + smu7_thermal_parameter_init(hwmgr); result = smu7_update_edc_leakage_table(hwmgr); - if (result) { - smu7_hwmgr_backend_fini(hwmgr); - return result; - } + if (result) + goto fail; return 0; +fail: + smu7_hwmgr_backend_fini(hwmgr); + return result; } static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) @@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct smu7_power_state *smu7_ps = - cast_phw_smu7_power_state(&request_ps->hardware); + struct smu7_power_state *smu7_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t latency; bool latency_allowed = false; + smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); + if (!smu7_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); data->mclk_ignore_signal = false; @@ -4000,6 +4001,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, uint32_t offset, val_vid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct amdgpu_device *adev = hwmgr->adev; + int ret = 0; /* size must be at least 4 bytes for all sensors */ if (*size < 4) @@ -4007,12 +4009,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); + if (ret) + return ret; *((uint32_t *)value) = sclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); + if (ret) + return ret; *((uint32_t *)value) = mclk; *size = 4; return 0; @@ -4965,13 +4971,14 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); - int size = 0; + int size = 0, ret = 0; uint32_t i, now, clock, pcie_speed; switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); - + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); + if (ret) + return ret; for (i = 0; i < sclk_table->count; i++) { if (clock > sclk_table->dpm_levels[i].value) continue; @@ -4985,8 +4992,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); - + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); + if (ret) + return ret; for (i = 0; i < mclk_table->count; i++) { if (clock > mclk_table->dpm_levels[i].value) continue; @@ -5640,7 +5648,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint mode = input[size]; switch (mode) { case PP_SMC_POWER_PROFILE_CUSTOM: - if (size < 8 && size != 0) + if (size != 8 && size != 0) return -EINVAL; /* If only CUSTOM is passed in, use the saved values. Check * that we actually have a CUSTOM profile by ensuring that @@ -5746,7 +5754,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .patch_boot_state = smu7_dpm_patch_boot_state, .get_pp_table_entry = smu7_get_pp_table_entry, .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, - .powerdown_uvd = smu7_powerdown_uvd, .powergate_uvd = smu7_powergate_uvd, .powergate_vce = smu7_powergate_vce, .disable_clock_power_gating = smu7_disable_clock_power_gating, @@ -5791,7 +5798,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, - .get_asic_baco_capability = smu7_baco_get_capability, + .get_bamaco_support = smu7_get_bamaco_support, .get_asic_baco_state = smu7_baco_get_state, .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c index a8fc0fa44db6..ba5c1237fcfe 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c @@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b015a601b385..9b20076e26c0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) } if (le32_to_cpu(info->ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableDFSBypass); } @@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) hwmgr->dyn_state.uvd_clock_voltage_dependency_table; unsigned long clock = 0; uint32_t level; + int ret; if (NULL == table || table->count <= 0) return -EINVAL; @@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr) data->uvd_dpm.soft_min_clk = 0; data->uvd_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); + if (ret) + return ret; if (level < table->count) clock = table->entries[level].vclk; @@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) hwmgr->dyn_state.vce_clock_voltage_dependency_table; unsigned long clock = 0; uint32_t level; + int ret; if (NULL == table || table->count <= 0) return -EINVAL; @@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr) data->vce_dpm.soft_min_clk = 0; data->vce_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); + if (ret) + return ret; if (level < table->count) clock = table->entries[level].ecclk; @@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) hwmgr->dyn_state.acp_clock_voltage_dependency_table; unsigned long clock = 0; uint32_t level; + int ret; if (NULL == table || table->count <= 0) return -EINVAL; @@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr) data->acp_dpm.soft_min_clk = 0; data->acp_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); + if (ret) + return ret; if (level < table->count) clock = table->entries[level].acpclk; @@ -1065,16 +1074,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps) { - struct smu8_power_state *smu8_ps = - cast_smu8_power_state(&prequest_ps->hardware); - - const struct smu8_power_state *smu8_current_ps = - cast_const_smu8_power_state(&pcurrent_ps->hardware); - + struct smu8_power_state *smu8_ps; + const struct smu8_power_state *smu8_current_ps; struct smu8_hwmgr *data = hwmgr->backend; struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; + smu8_ps = cast_smu8_power_state(&prequest_ps->hardware); + smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware); + + if (!smu8_ps || !smu8_current_ps) + return -EINVAL; + smu8_ps->need_dfs_bypass = true; data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); @@ -2033,7 +2044,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { .apply_state_adjust_rules = smu8_apply_state_adjust_rules, .force_dpm_level = smu8_dpm_force_dpm_level, .get_power_state_size = smu8_get_power_state_size, - .powerdown_uvd = smu8_dpm_powerdown_uvd, .powergate_uvd = smu8_dpm_powergate_uvd, .powergate_vce = smu8_dpm_powergate_vce, .powergate_acp = smu8_dpm_powergate_acp, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index c66ef9741535..c1ce1d7cae48 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,13 +28,13 @@ #include "vega10_inc.h" #include "smu9_baco.h" -bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) +int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; WREG32(0x12074, 0xFFF0003B); data = RREG32(0x12075); @@ -43,10 +43,10 @@ bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 9ff7c2ea1b58..2c100482084c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 6d6bc6a380b3..9ace863792d4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) return 0; } -static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) +static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; - int i; uint32_t sub_vendor_id, hw_revision; uint32_t top32, bottom32; struct amdgpu_device *adev = hwmgr->adev; + int ret, i; vega10_initialize_power_tune_defaults(hwmgr); @@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; - smum_send_msg_to_smc(hwmgr, + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); + if (ret) + return ret; + /* ACG firmware has major version 5 */ if ((hwmgr->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; @@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_PCC_LIMIT].supported = true; /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + if (ret) + return ret; + + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + if (ret) + return ret; adev->unique_id = ((uint64_t)bottom32 << 32) | top32; + return 0; } #ifdef PPLIB_VEGA10_EVV_SUPPORT @@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) vega10_set_features_platform_caps(hwmgr); - vega10_init_dpm_defaults(hwmgr); + result = vega10_init_dpm_defaults(hwmgr); + if (result) + return result; #ifdef PPLIB_VEGA10_EVV_SUPPORT /* Get leakage voltage based on leakage ID. */ @@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; uint32_t agc_btc_response; + int ret; if (data->smu_features[GNLD_ACG].supported) { if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); + if (ret) + return ret; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); + if (ret) + agc_btc_response = 0; if (1 == agc_btc_response) { if (1 == data->acg_loop_state) @@ -2481,10 +2497,12 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); - - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); - + result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + if (result) + return result; + result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + if (result) + return result; serial_number = ((uint64_t)bottom32 << 32) | top32; if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) { @@ -2571,8 +2589,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) } } - pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, + result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, &voltage_table); + PP_ASSERT_WITH_CODE(!result, + "Failed to get voltage table!", + return result); pp_table->MaxVidStep = voltage_table.max_vid_step; pp_table->GfxDpmVoltageMode = @@ -2913,9 +2934,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - vega10_enable_smc_features(hwmgr, false, feature_mask); - - return 0; + return vega10_enable_smc_features(hwmgr, false, feature_mask); } /** @@ -3259,8 +3278,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct vega10_power_state *vega10_ps = - cast_phw_vega10_power_state(&request_ps->hardware); + struct vega10_power_state *vega10_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3278,6 +3296,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; uint32_t latency; + vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); + if (!vega10_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); @@ -3415,13 +3437,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co const struct vega10_power_state *vega10_ps = cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; + uint32_t sclk, mclk; uint32_t i; + if (vega10_ps == NULL) + return -EINVAL; + sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3728,6 +3754,9 @@ static int vega10_generate_dpm_level_enable_mask( cast_const_phw_vega10_power_state(states->pnew_state); int i; + if (vega10_ps == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), "Attempt to Trim DPM States Failed!", return -1); @@ -3900,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) { uint32_t value; + int ret; if (!query) return -EINVAL; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); + if (ret) + return ret; /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ *query = value << 8; @@ -3924,11 +3956,16 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz); + if (ret) + break; + *((uint32_t *)value) = sclk_mhz * 100; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx); + if (ret) + break; if (mclk_idx < dpm_table->mem_table.count) { *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; *size = 4; @@ -4800,14 +4837,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; PPTable_t *pptable = &(data->smc_state_table.pp_table); - int i, now, size = 0, count = 0; + int i, ret, now, size = 0, count = 0; switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); + if (ret) + break; if (hwmgr->pp_one_vf && (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) @@ -4823,7 +4862,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.mclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); + if (ret) + break; for (i = 0; i < mclk_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4834,7 +4875,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.socclk_dpm_key_disabled) break; - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); + if (ret) + break; for (i = 0; i < soc_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4845,8 +4888,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.dcefclk_dpm_key_disabled) break; - smum_send_msg_to_smc_with_parameter(hwmgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); + if (ret) + break; for (i = 0; i < dcef_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -4995,6 +5040,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr, vega10_psa = cast_const_phw_vega10_power_state(pstate1); vega10_psb = cast_const_phw_vega10_power_state(pstate2); + if (vega10_psa == NULL || vega10_psb == NULL) + return -EINVAL; /* If the two states don't even have the same number of performance levels * they cannot be the same state. @@ -5128,6 +5175,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL; vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock = @@ -5179,6 +5228,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL; vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock = @@ -5420,6 +5471,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) return; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1; if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5442,6 +5496,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1; if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5632,6 +5689,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return -EINVAL; vega10_ps = cast_const_phw_vega10_power_state(state); + if (vega10_ps == NULL) + return -EINVAL; i = index > vega10_ps->performance_level_count - 1 ? vega10_ps->performance_level_count - 1 : index; @@ -5756,7 +5815,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .set_power_limit = vega10_set_power_limit, .odn_edit_dpm_table = vega10_odn_edit_dpm_table, .get_performance_level = vega10_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega10_baco_set_state, .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c index 3007b054c873..776d58ea63ae 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c @@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); if (0 != result) - return result; + goto exit_safe_mode; vega10_didt_set_mask(hwmgr, false); +exit_safe_mode: amdgpu_gfx_rlc_exit_safe_mode(adev, 0); - return 0; + return result; } static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 379012494da5..56423aedf3fa 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return -1; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 460067933de2..10fd4e9f016c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) return 0; } -static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) +static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); struct amdgpu_device *adev = hwmgr->adev; uint32_t top32, bottom32; - int i; + int i, ret; data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = FEATURE_DPM_PREFETCHER_BIT; @@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr) } /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + if (ret) + return ret; + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + if (ret) + return ret; adev->unique_id = ((uint64_t)bottom32 << 32) | top32; + + return 0; } static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) @@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr) vega12_set_features_platform_caps(hwmgr); - vega12_init_dpm_defaults(hwmgr); + result = vega12_init_dpm_defaults(hwmgr); + if (result) { + pr_err("%s failed\n", __func__); + return result; + } /* Parse pptable data read from VBIOS */ vega12_set_private_data_based_on_pptable(hwmgr); @@ -2966,7 +2976,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, .get_performance_level = vega12_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega12_baco_set_state, .get_ppfeature_status = vega12_get_ppfeature_status, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index dad4c80aee58..424e4ec9e389 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,22 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr) +int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index bdad9c915631..0f2dd8c008ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 3b33af30eb0f..baf251fe5d82 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) return 0; } -static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) +static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); struct amdgpu_device *adev = hwmgr->adev; uint32_t top32, bottom32; - int i; + int i, ret; data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = FEATURE_DPM_PREFETCHER_BIT; @@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) } /* Get the SN to turn into a Unique ID */ - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); + if (ret) + return ret; + + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); + if (ret) + return ret; adev->unique_id = ((uint64_t)bottom32 << 32) | top32; + + return 0; } static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) @@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data; struct amdgpu_device *adev = hwmgr->adev; + int result; data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); if (data == NULL) @@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) vega20_set_features_platform_caps(hwmgr); - vega20_init_dpm_defaults(hwmgr); - + result = vega20_init_dpm_defaults(hwmgr); + if (result) { + pr_err("%s failed\n", __func__); + return result; + } /* Parse pptable data read from VBIOS */ vega20_set_private_data_based_on_pptable(hwmgr); @@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - if (size == 0 && !data->is_custom_profile_set) + + if (size != 10 && size != 0) return -EINVAL; - if (size < 10 && size != 0) + + if (size == 0 && !data->is_custom_profile_set) return -EINVAL; result = vega20_get_activity_monitor_coeff(hwmgr, @@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui activity_monitor.Fclk_PD_Data_error_coeff = input[8]; activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; break; + default: + return -EINVAL; } result = vega20_set_activity_monitor_coeff(hwmgr, @@ -4422,7 +4437,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .notify_cac_buffer_info = vega20_notify_cac_buffer_info, .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, /* BACO related */ - .get_asic_baco_capability = vega20_baco_get_capability, + .get_bamaco_support = vega20_get_bamaco_support, .get_asic_baco_state = vega20_baco_get_state, .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 79c817752a33..2b446f8866ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) return table_address; } -#if 0 -static void dump_pptable(PPTable_t *pptable) -{ - int i; - - pr_info("Version = 0x%08x\n", pptable->Version); - - pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0); - pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau); - pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1); - pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau); - pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2); - pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau); - pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3); - pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau); - pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc); - pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau); - pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); - pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); - pr_info("ThbmLimit = %d\n", pptable->ThbmLimit); - pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit); - pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit); - pr_info("TplxLimit = %d\n", pptable->TplxLimit); - pr_info("FitLimit = %d\n", pptable->FitLimit); - - pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); - pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits); - pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit); - - pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); - pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - - pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid); - pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid); - pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - pr_info("Padding234 = 0x%02x\n", pptable->Padding234); - - pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - pr_info("[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); - - pr_info("[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); - - pr_info("[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); - - pr_info("[PPCLK_ECLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_ECLK].padding, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); - - pr_info("[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); - - pr_info("[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); - - pr_info("[PPCLK_DCEFCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCEFCLK].padding, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); - - pr_info("[PPCLK_DISPCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DISPCLK].padding, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); - - pr_info("[PPCLK_PIXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PIXCLK].padding, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); - - pr_info("[PPCLK_PHYCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PHYCLK].padding, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); - - pr_info("[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); - - - pr_info("FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - pr_info("FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - pr_info("FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - pr_info("FreqTableEclk\n"); - for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]); - - pr_info("FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - pr_info("FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - pr_info("FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - pr_info("FreqTableDcefclk\n"); - for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]); - - pr_info("FreqTableDispclk\n"); - for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]); - - pr_info("FreqTablePixclk\n"); - for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]); - - pr_info("FreqTablePhyclk\n"); - for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]); - - pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]); - pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]); - pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]); - pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]); - pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]); - pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]); - pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks); - - pr_info("Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - pr_info("Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq); - pr_info("Padding789 = 0x%x\n", pptable->Padding789); - pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n", - pptable->CksVoltageOffset.a, - pptable->CksVoltageOffset.b, - pptable->CksVoltageOffset.c); - pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); - pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); - pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); - pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); - pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); - pr_info("Padding456 = 0x%x\n", pptable->Padding456); - - pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv); - pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]); - pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]); - pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]); - - pr_info("PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]); - - pr_info("PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]); - - pr_info("LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]); - - pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); - pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); - pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); - - pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); - pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); - pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); - pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); - pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); - pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); - pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - - pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOn.a, - pptable->dBtcGbGfxCksOn.b, - pptable->dBtcGbGfxCksOn.c); - pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOff.a, - pptable->dBtcGbGfxCksOff.b, - pptable->dBtcGbGfxCksOff.c); - pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - pr_info("XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - pr_info("XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - pr_info("XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - pr_info("XgmiUclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]); - pr_info("XgmiSocclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]); - pr_info("XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - - pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); - pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); - - pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm); - pr_info("padding16_Fan = %d\n", pptable->padding16_Fan); - - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - - pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - for (i = 0; i < 11; i++) - pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); - - for (i = 0; i < 3; i++) - pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]); - - pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - - pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V); - - pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); - pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - pr_info("SocOffset = 0x%x\n", pptable->SocOffset); - pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset); - pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset); - pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - pr_info("AcDcGpio = %d\n", pptable->AcDcGpio); - pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity); - pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); - pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - - pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); - pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - pr_info("Padding1 = 0x%x\n", pptable->Padding1); - pr_info("Padding2 = 0x%x\n", pptable->Padding2); - - pr_info("LedPin0 = %d\n", pptable->LedPin0); - pr_info("LedPin1 = %d\n", pptable->LedPin1); - pr_info("LedPin2 = %d\n", pptable->LedPin2); - pr_info("padding8_4 = 0x%x\n", pptable->padding8_4); - - pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - pr_info("I2cControllers[%d]:\n", i); - pr_info(" .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - pr_info(" .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - pr_info(" .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - pr_info(" .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - pr_info(" .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrottler); - pr_info(" .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - pr_info(" .I2cSpeed = %d\n", - pptable->I2cControllers[i].I2cSpeed); - } - - for (i = 0; i < 10; i++) - pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); - - for (i = 0; i < 8; i++) - pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]); -} -#endif - static int check_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) @@ -652,8 +80,6 @@ static int check_powerplay_tables( return -EINVAL; } - //dump_pptable(&powerplay_table->smcPPTable); - return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index a3331ffb2daf..1b1c88590156 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t tach_period, crystal_clock_freq; int result = 0; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h index f4f9a104d170..915f1b8e4dba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h @@ -396,7 +396,6 @@ struct phm_odn_clock_levels { }; extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 6f536159df4d..c661185753b4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -122,8 +122,8 @@ struct phm_acpclock_voltage_dependency_record { }; struct phm_clock_voltage_dependency_table { - uint32_t count; /* Number of entries. */ - struct phm_clock_voltage_dependency_record entries[]; /* Dynamically allocate count entries. */ + uint32_t count; + struct phm_clock_voltage_dependency_record entries[]; }; struct phm_phase_shedding_limits_record { @@ -160,28 +160,28 @@ struct phm_vce_clock_voltage_dependency_record { }; struct phm_phase_shedding_limits_table { - uint32_t count; + uint32_t count; struct phm_phase_shedding_limits_record entries[]; }; struct phm_vceclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ + uint8_t count; + struct phm_vceclock_voltage_dependency_record entries[]; }; struct phm_uvdclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ + uint8_t count; + struct phm_uvdclock_voltage_dependency_record entries[]; }; struct phm_samuclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ + uint8_t count; + struct phm_samuclock_voltage_dependency_record entries[]; }; struct phm_acpclock_voltage_dependency_table { - uint32_t count; /* Number of entries. */ - struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ + uint32_t count; + struct phm_acpclock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_table { @@ -257,7 +257,6 @@ struct pp_hwmgr_func { int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, unsigned long, struct pp_power_state *); int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); - int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate); @@ -351,7 +350,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr); + int (*get_bamaco_support)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c index 7eeab84d421a..ac9ec8257f82 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c @@ -185,10 +185,13 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; + int ret = 0; - smum_send_msg_to_smc(hwmgr, + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion, &smc_driver_if_version); + if (ret) + return ret; if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c index 5a010cd38303..baf51cd82a35 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c @@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, } -int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) -{ - uint32_t data; - uint32_t addr; - uint8_t *dest_byte; - uint8_t i, data_byte[4] = {0}; - uint32_t *pdata = (uint32_t *)&data_byte; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); - - addr = smc_start_address; - - while (byte_count >= 4) { - smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); - - *dest = PP_SMC_TO_HOST_UL(data); - - dest += 1; - byte_count -= 4; - addr += 4; - } - - if (byte_count) { - smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); - *pdata = PP_SMC_TO_HOST_UL(data); - /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ - dest_byte = (uint8_t *)dest; - for (i = 0; i < byte_count; i++) - dest_byte[i] = data_byte[i]; - } - - return 0; -} - - int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h index e7303dc8c260..63e428ceaee4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h @@ -53,8 +53,6 @@ struct smu7_smumgr { }; -int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, - uint32_t *dest, uint32_t byte_count, uint32_t limit); int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c index a70d73896649..f9c0f117725d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c @@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, uint64_t *features_enabled) { uint32_t enabled_features; + int ret; if (features_enabled == NULL) return -EINVAL; - smum_send_msg_to_smc(hwmgr, + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures, &enabled_features); + if (ret) + return ret; + *features_enabled = enabled_features; return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c index b52ce135d84d..d3ff6a831ed5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c @@ -257,20 +257,18 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t); tools_size = 0x19000; - if (tools_size) { - ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, - tools_size, - PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, - &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); - if (ret) - goto err1; + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + tools_size, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); + if (ret) + goto err1; - priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; - priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; - } + priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; + priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; /* allocate space for AVFS Fuse table */ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 65333141b1c1..d79a1d94661a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -45,6 +45,7 @@ #include "smu_v13_0_6_ppt.h" #include "smu_v13_0_7_ppt.h" #include "smu_v14_0_0_ppt.h" +#include "smu_v14_0_2_ppt.h" #include "amd_pcie.h" /* @@ -71,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -139,7 +144,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, min, - max); + max, + false); return ret; } @@ -232,7 +238,8 @@ static bool is_vcn_enabled(struct amdgpu_device *adev) } static int smu_dpm_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; @@ -247,12 +254,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu, if (!smu->ppt_funcs->dpm_set_vcn_enable) return 0; - if (atomic_read(&power_gate->vcn_gated) ^ enable) + if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable) return 0; - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst); if (!ret) - atomic_set(&power_gate->vcn_gated, !enable); + atomic_set(&power_gate->vcn_gated[inst], !enable); return ret; } @@ -323,12 +330,25 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, return ret; } +static int smu_set_mall_enable(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->ppt_funcs->set_mall_enable) + return 0; + + ret = smu->ppt_funcs->set_mall_enable(smu); + + return ret; +} + /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * * @handle: smu_context pointer - * @block_type: the IP block to power gate/ungate - * @gate: to power gate if true, ungate otherwise + * @block_type: the IP block to power gate/ungate + * @gate: to power gate if true, ungate otherwise + * @inst: the instance of the IP block to power gate/ungate * * This API uses no smu->mutex lock protection due to: * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). @@ -339,7 +359,8 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, */ static int smu_dpm_set_power_gate(void *handle, uint32_t block_type, - bool gate) + bool gate, + int inst) { struct smu_context *smu = handle; int ret = 0; @@ -358,10 +379,10 @@ static int smu_dpm_set_power_gate(void *handle, */ case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCN: - ret = smu_dpm_set_vcn_enable(smu, !gate); + ret = smu_dpm_set_vcn_enable(smu, !gate, inst); if (ret) - dev_err(smu->adev->dev, "Failed to power %s VCN!\n", - gate ? "gate" : "ungate"); + dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n", + gate ? "gate" : "ungate", inst); break; case AMD_IP_BLOCK_TYPE_GFX: ret = smu_gfx_off_control(smu, gate); @@ -536,7 +557,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return false; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && + amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) return true; return false; @@ -590,7 +612,8 @@ static int smu_sys_set_pp_table(void *handle, return -EIO; } - if (!smu_table->hardcode_pptable) { + if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) { + kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); if (!smu_table->hardcode_pptable) return -ENOMEM; @@ -671,6 +694,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) renoir_set_ppt_funcs(smu); break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): vangogh_set_ppt_funcs(smu); break; case IP_VERSION(13, 0, 1): @@ -704,6 +728,8 @@ static int smu_set_funcs(struct amdgpu_device *adev) smu_v13_0_0_set_ppt_funcs(smu); break; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): smu_v13_0_6_set_ppt_funcs(smu); /* Enable pp_od_clk_voltage node */ smu->od_enabled = true; @@ -713,8 +739,14 @@ static int smu_set_funcs(struct amdgpu_device *adev) break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): smu_v14_0_0_set_ppt_funcs(smu); break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu_v14_0_2_set_ppt_funcs(smu); + break; default: return -EINVAL; } @@ -722,9 +754,9 @@ static int smu_set_funcs(struct amdgpu_device *adev) return 0; } -static int smu_early_init(void *handle) +static int smu_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu; int r; @@ -737,7 +769,9 @@ static int smu_early_init(void *handle) smu->is_apu = false; smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; + smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; + smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; mutex_init(&smu->message_lock); @@ -755,21 +789,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; - int vcn_gate, jpeg_gate; + int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i; int ret = 0; if (!smu->ppt_funcs->set_default_dpm_table) return 0; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) - vcn_gate = atomic_read(&power_gate->vcn_gated); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]); + } if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) jpeg_gate = atomic_read(&power_gate->jpeg_gated); if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - ret = smu_dpm_set_vcn_enable(smu, true); - if (ret) - return ret; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + ret = smu_dpm_set_vcn_enable(smu, true, i); + if (ret) + return ret; + } } if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { @@ -786,8 +824,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu) if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) smu_dpm_set_jpeg_enable(smu, !jpeg_gate); err_out: - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) - smu_dpm_set_vcn_enable(smu, !vcn_gate); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); + } return ret; } @@ -805,9 +845,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu) return smu_set_config_table(smu, &adev->pm.config_table); } -static int smu_late_init(void *handle) +static int smu_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; @@ -987,7 +1027,10 @@ static int smu_alloc_memory_pool(struct smu_context *smu) memory_pool->size = pool_size; memory_pool->align = PAGE_SIZE; - memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; + memory_pool->domain = + (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ? + AMDGPU_GEM_DOMAIN_VRAM : + AMDGPU_GEM_DOMAIN_GTT; switch (pool_size) { case SMU_MEMORY_POOL_SIZE_256_MB: @@ -1190,24 +1233,43 @@ static void smu_swctf_delayed_work_handler(struct work_struct *work) static void smu_init_xgmi_plpd_mode(struct smu_context *smu) { + struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm); + struct smu_dpm_policy_ctxt *policy_ctxt; + struct smu_dpm_policy *policy; + + policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { - smu->plpd_mode = XGMI_PLPD_DEFAULT; + if (policy) + policy->current_level = XGMI_PLPD_DEFAULT; return; } /* PMFW put PLPD into default policy after enabling the feature */ if (smu_feature_is_enabled(smu, - SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) - smu->plpd_mode = XGMI_PLPD_DEFAULT; - else - smu->plpd_mode = XGMI_PLPD_NONE; + SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) { + if (policy) + policy->current_level = XGMI_PLPD_DEFAULT; + } else { + policy_ctxt = dpm_ctxt->dpm_policies; + if (policy_ctxt) + policy_ctxt->policy_mask &= + ~BIT(PP_PM_POLICY_XGMI_PLPD); + } +} + +static void smu_init_power_profile(struct smu_context *smu) +{ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) + smu->power_profile_mode = + PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu_power_profile_mode_get(smu, smu->power_profile_mode); } -static int smu_sw_init(void *handle) +static int smu_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - int ret; + int i, ret; smu->pool_size = adev->pm.smu_prv_buffer_size; smu->smu_feature.feature_num = SMU_FEATURE_MAX; @@ -1218,30 +1280,14 @@ static int smu_sw_init(void *handle) INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - - smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; - smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; - smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; - smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; - smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; + smu_init_power_profile(smu); smu->display_config = &adev->pm.pm_display_cfg; smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; @@ -1282,9 +1328,9 @@ static int smu_sw_init(void *handle) return 0; } -static int smu_sw_fini(void *handle) +static int smu_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1294,6 +1340,11 @@ static int smu_sw_fini(void *handle) return ret; } + if (smu->custom_profile_params) { + kfree(smu->custom_profile_params); + smu->custom_profile_params = NULL; + } + smu_fini_microcode(smu); return 0; @@ -1519,6 +1570,7 @@ static int smu_smc_hw_setup(struct smu_context *smu) case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): case IP_VERSION(11, 0, 12): if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); @@ -1651,7 +1703,9 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) + pcie_gen = 4; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) pcie_gen = 3; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) pcie_gen = 2; @@ -1664,7 +1718,9 @@ static int smu_smc_hw_setup(struct smu_context *smu) * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ - if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32) + pcie_width = 7; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) pcie_width = 6; else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) pcie_width = 5; @@ -1724,6 +1780,8 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + smu->smc_fw_state = SMU_FW_INIT; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { if (smu->ppt_funcs->load_microcode) { @@ -1753,13 +1811,13 @@ static int smu_start_smc_engine(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_hw_init(struct amdgpu_ip_block *ip_block) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { + if (amdgpu_sriov_multi_vf_mode(adev)) { smu->pm_enabled = false; return 0; } @@ -1781,10 +1839,12 @@ static int smu_hw_init(void *handle) ret = smu_set_gfx_imu_enable(smu); if (ret) return ret; - smu_dpm_set_vcn_enable(smu, true); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + smu_dpm_set_vcn_enable(smu, true, i); smu_dpm_set_jpeg_enable(smu, true); smu_dpm_set_vpe_enable(smu, true); smu_dpm_set_umsch_mm_enable(smu, true); + smu_set_mall_enable(smu); smu_set_gfx_cgpg(smu, true); } @@ -1838,6 +1898,8 @@ static int smu_disable_dpms(struct smu_context *smu) case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): return 0; default: break; @@ -1862,6 +1924,7 @@ static int smu_disable_dpms(struct smu_context *smu) case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): return 0; @@ -1888,20 +1951,12 @@ static int smu_disable_dpms(struct smu_context *smu) } /* - * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly + * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev) || adev->in_s0ix) { - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 4): - case IP_VERSION(13, 0, 11): - case IP_VERSION(14, 0, 0): - case IP_VERSION(14, 0, 1): - return 0; - default: - break; - } - } + if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 && + smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) + return 0; /* * For gpu reset, runpm and hibernation through BACO, @@ -1980,23 +2035,24 @@ static int smu_reset_mp1_state(struct smu_context *smu) return ret; } -static int smu_hw_fini(void *handle) +static int smu_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - int ret; + int i, ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; - smu_dpm_set_vcn_enable(smu, false); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + smu_dpm_set_vcn_enable(smu, false, i); + adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE; + } smu_dpm_set_jpeg_enable(smu, false); + adev->jpeg.cur_state = AMD_PG_STATE_GATE; smu_dpm_set_vpe_enable(smu, false); smu_dpm_set_umsch_mm_enable(smu, false); - adev->vcn.cur_state = AMD_PG_STATE_GATE; - adev->jpeg.cur_state = AMD_PG_STATE_GATE; - if (!smu->pm_enabled) return 0; @@ -2013,9 +2069,9 @@ static int smu_hw_fini(void *handle) return 0; } -static void smu_late_fini(void *handle) +static void smu_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; kfree(smu); @@ -2024,31 +2080,36 @@ static void smu_late_fini(void *handle) static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + struct amdgpu_ip_block *ip_block; int ret; - ret = smu_hw_fini(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); + if (!ip_block) + return -EINVAL; + + ret = smu_hw_fini(ip_block); if (ret) return ret; - ret = smu_hw_init(adev); + ret = smu_hw_init(ip_block); if (ret) return ret; - ret = smu_late_init(adev); + ret = smu_late_init(ip_block); if (ret) return ret; return 0; } -static int smu_suspend(void *handle) +static int smu_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; uint64_t count; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; if (!smu->pm_enabled) @@ -2072,16 +2133,19 @@ static int smu_suspend(void *handle) if (!ret) adev->gfx.gfx_off_entrycount = count; + /* clear this on suspend so it will get reprogrammed on resume */ + smu->workload_mask = 0; + return 0; } -static int smu_resume(void *handle) +static int smu_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_multi_vf_mode(adev)) return 0; if (!smu->pm_enabled) @@ -2133,13 +2197,13 @@ static int smu_display_configuration_change(void *handle, return 0; } -static int smu_set_clockgating_state(void *handle, +static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; } -static int smu_set_powergating_state(void *handle, +static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; @@ -2184,24 +2248,49 @@ static int smu_enable_umd_pstate(void *handle, } static int smu_bump_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size) + long *custom_params, + u32 custom_params_max_idx) { - int ret = 0; + u32 workload_mask = 0; + int i, ret = 0; + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + if (smu->workload_refcount[i]) + workload_mask |= 1 << i; + } + + if (smu->workload_mask == workload_mask) + return 0; if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, + custom_params, + custom_params_max_idx); + + if (!ret) + smu->workload_mask = workload_mask; return ret; } +static void smu_power_profile_mode_get(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + smu->workload_refcount[profile_mode]++; +} + +static void smu_power_profile_mode_put(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE profile_mode) +{ + if (smu->workload_refcount[profile_mode]) + smu->workload_refcount[profile_mode]--; +} + static int smu_adjust_power_state_dynamic(struct smu_context *smu, - enum amd_dpm_forced_level level, - bool skip_display_settings) + enum amd_dpm_forced_level level, + bool skip_display_settings) { int ret = 0; - int index = 0; - long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!skip_display_settings) { @@ -2229,7 +2318,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { - dev_err(smu->adev->dev, "Failed to set performance level!"); + if (ret == -EOPNOTSUPP) + dev_info(smu->adev->dev, "set performance level %d not supported", + level); + else + dev_err(smu->adev->dev, "Failed to set performance level %d", + level); return ret; } @@ -2238,14 +2332,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; - - if (smu->power_profile_mode != workload) - smu_bump_power_profile_mode(smu, &workload, 0); - } + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, NULL, 0); return ret; } @@ -2267,6 +2355,8 @@ static int smu_handle_task(struct smu_context *smu, ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: + ret = smu_adjust_power_state_dynamic(smu, level, true); + break; case AMD_PP_TASK_READJUST_POWER_STATE: ret = smu_adjust_power_state_dynamic(smu, level, true); break; @@ -2290,12 +2380,11 @@ static int smu_handle_dpm_task(void *handle, static int smu_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, - bool en) + bool enable) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload; - uint32_t index; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -2303,21 +2392,54 @@ static int smu_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; - } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (enable) + smu_power_profile_mode_get(smu, type); + else + smu_power_profile_mode_put(smu, type); + /* don't switch the active workload when paused */ + if (smu->pause_workload) + ret = 0; + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); + if (ret) { + if (enable) + smu_power_profile_mode_put(smu, type); + else + smu_power_profile_mode_get(smu, type); + return ret; + } } + return 0; +} + +static int smu_pause_power_profile(void *handle, + bool pause) +{ + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + int ret; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, &workload, 0); + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + smu->pause_workload = pause; + + /* force to bootup default profile */ + if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, + workload_mask, + NULL, + 0); + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); + return ret; + } return 0; } @@ -2710,6 +2832,8 @@ int smu_get_power_limit(void *handle, switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): @@ -2833,6 +2957,10 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; case OD_FAN_MINIMUM_PWM: clk_type = SMU_OD_FAN_MINIMUM_PWM; break; + case OD_FAN_ZERO_RPM_ENABLE: + clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; + case OD_FAN_ZERO_RPM_STOP_TEMP: + clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; default: clk_type = SMU_CLK_COUNT; break; } @@ -2896,9 +3024,10 @@ static int smu_read_sensor(void *handle, int *size_arg) { struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; - int ret = 0; + int i, ret = 0; uint32_t *size, size_val; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2944,7 +3073,13 @@ static int smu_read_sensor(void *handle, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; + *(uint32_t *)data = 0; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { + *(uint32_t *)data = 1; + break; + } + } *size = 4; break; case AMDGPU_PP_SENSOR_MIN_FAN_RPM: @@ -3004,12 +3139,35 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + bool custom = false; + int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { + custom = true; + /* clear frontend mask so custom changes propogate */ + smu->workload_mask = 0; + } + + if ((param[param_size] != smu->power_profile_mode) || custom) { + /* clear the old user preference */ + smu_power_profile_mode_put(smu, smu->power_profile_mode); + /* set the new user preference */ + smu_power_profile_mode_get(smu, param[param_size]); + ret = smu_bump_power_profile_mode(smu, + custom ? param : NULL, + custom ? param_size : 0); + if (ret) + smu_power_profile_mode_put(smu, param[param_size]); + else + /* store the user's preference */ + smu->power_profile_mode = param[param_size]; + } + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) @@ -3223,17 +3381,17 @@ static int smu_set_xgmi_pstate(void *handle, return ret; } -static bool smu_get_baco_capability(void *handle) +static int smu_get_baco_capability(void *handle) { struct smu_context *smu = handle; if (!smu->pm_enabled) return false; - if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support) + if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) return false; - return smu->ppt_funcs->baco_is_support(smu); + return smu->ppt_funcs->get_bamaco_support(smu); } static int smu_baco_set_state(void *handle, int state) @@ -3274,15 +3432,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) return ret; } -bool smu_mode2_reset_is_support(struct smu_context *smu) +bool smu_link_reset_is_support(struct smu_context *smu) { bool ret = false; if (!smu->pm_enabled) return false; - if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) - ret = smu->ppt_funcs->mode2_reset_is_support(smu); + if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support) + ret = smu->ppt_funcs->link_reset_is_support(smu); return ret; } @@ -3317,6 +3475,19 @@ static int smu_mode2_reset(void *handle) return ret; } +int smu_link_reset(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->pm_enabled) + return -EOPNOTSUPP; + + if (smu->ppt_funcs->link_reset) + ret = smu->ppt_funcs->link_reset(smu); + + return ret; +} + static int smu_enable_gfx_features(void *handle) { struct smu_context *smu = handle; @@ -3488,30 +3659,118 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) return 0; } -int smu_set_xgmi_plpd_mode(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) +static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf, + size_t *size) +{ + size_t offset = *size; + int level; + + for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) { + if (level == policy->current_level) + offset += sysfs_emit_at(sysbuf, offset, + "%d : %s*\n", level, + policy->desc->get_desc(policy, level)); + else + offset += sysfs_emit_at(sysbuf, offset, + "%d : %s\n", level, + policy->desc->get_desc(policy, level)); + } + + *size = offset; +} + +ssize_t smu_get_pm_policy_info(struct smu_context *smu, + enum pp_pm_policy p_type, char *sysbuf) +{ + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy_ctxt *policy_ctxt; + struct smu_dpm_policy *dpm_policy; + size_t offset = 0; + + policy_ctxt = dpm_ctxt->dpm_policies; + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || + !policy_ctxt->policy_mask) + return -EOPNOTSUPP; + + if (p_type == PP_PM_POLICY_NONE) + return -EINVAL; + + dpm_policy = smu_get_pm_policy(smu, p_type); + if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc) + return -ENOENT; + + if (!sysbuf) + return -EINVAL; + + smu_print_dpm_policy(dpm_policy, sysbuf, &offset); + + return offset; +} + +struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, + enum pp_pm_policy p_type) +{ + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy_ctxt *policy_ctxt; + int i; + + policy_ctxt = dpm_ctxt->dpm_policies; + if (!policy_ctxt) + return NULL; + + for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) { + if (policy_ctxt->policies[i].policy_type == p_type) + return &policy_ctxt->policies[i]; + } + + return NULL; +} + +int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, + int level) { + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy *dpm_policy = NULL; + struct smu_dpm_policy_ctxt *policy_ctxt; int ret = -EOPNOTSUPP; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + policy_ctxt = dpm_ctxt->dpm_policies; + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || + !policy_ctxt->policy_mask) return ret; - /* PLPD policy is not supported if it's NONE */ - if (smu->plpd_mode == XGMI_PLPD_NONE) + if (level < 0 || level >= PP_POLICY_MAX_LEVELS) + return -EINVAL; + + dpm_policy = smu_get_pm_policy(smu, p_type); + + if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy) return ret; - if (smu->plpd_mode == mode) + if (dpm_policy->current_level == level) return 0; - if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy) - ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode); + ret = dpm_policy->set_policy(smu, level); if (!ret) - smu->plpd_mode = mode; + dpm_policy->current_level = level; return ret; } +static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) +{ + struct smu_context *smu = handle; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) + return -EOPNOTSUPP; + + return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); +} + static const struct amd_pm_funcs swsmu_pm_funcs = { /* export for sysfs */ .set_fan_control_mode = smu_set_fan_control_mode, @@ -3533,6 +3792,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_pp_table = smu_sys_get_pp_table, .set_pp_table = smu_sys_set_pp_table, .switch_power_profile = smu_switch_power_profile, + .pause_power_profile = smu_pause_power_profile, /* export to amdgpu */ .dispatch_tasks = smu_handle_dpm_task, .load_firmware = smu_load_microcode, @@ -3569,6 +3829,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_uclk_dpm_states = smu_get_uclk_dpm_states, .get_dpm_clock_table = smu_get_dpm_clock_table, .get_smu_prv_buf_details = smu_get_prv_buffer_details, + .get_xcp_metrics = smu_sys_get_xcp_metrics, }; int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, @@ -3714,3 +3975,38 @@ int smu_send_rma_reason(struct smu_context *smu) return ret; } + +/** + * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU + * @smu: smu_context pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns true if supported, false otherwise. + */ +bool smu_reset_sdma_is_supported(struct smu_context *smu) +{ + bool ret = false; + + if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported) + ret = smu->ppt_funcs->reset_sdma_is_supported(smu); + + return ret; +} + +int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma) + ret = smu->ppt_funcs->reset_sdma(smu, inst_mask); + + return ret; +} + +int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) +{ + if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn) + smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask); + + return 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 1fa81575788c..9aacc7bc1c69 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -362,6 +362,27 @@ struct smu_table_context { void *gpu_metrics_table; }; +struct smu_context; +struct smu_dpm_policy; + +struct smu_dpm_policy_desc { + const char *name; + char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level); +}; + +struct smu_dpm_policy { + struct smu_dpm_policy_desc *desc; + enum pp_pm_policy policy_type; + unsigned long level_mask; + int current_level; + int (*set_policy)(struct smu_context *ctxt, int level); +}; + +struct smu_dpm_policy_ctxt { + struct smu_dpm_policy policies[PP_PM_POLICY_NUM]; + unsigned long policy_mask; +}; + struct smu_dpm_context { uint32_t dpm_context_size; void *dpm_context; @@ -372,12 +393,13 @@ struct smu_dpm_context { struct smu_power_state *dpm_request_power_state; struct smu_power_state *dpm_current_power_state; struct mclock_latency_table *mclk_latency_table; + struct smu_dpm_policy_ctxt *dpm_policies; }; struct smu_power_gate { bool uvd_gated; bool vce_gated; - atomic_t vcn_gated; + atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; atomic_t umsch_mm_gated; @@ -416,9 +438,11 @@ struct mclock_latency_table { }; enum smu_reset_mode { - SMU_RESET_MODE_0, - SMU_RESET_MODE_1, - SMU_RESET_MODE_2, + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, + SMU_RESET_MODE_3, + SMU_RESET_MODE_4, }; enum smu_baco_state { @@ -459,7 +483,7 @@ struct smu_umd_pstate_table { struct cmn2asic_msg_mapping { int valid_mapping; int map_to; - int valid_in_vf; + uint32_t flags; }; struct cmn2asic_mapping { @@ -473,6 +497,12 @@ struct stb_context { spinlock_t lock; }; +enum smu_fw_status { + SMU_FW_INIT = 0, + SMU_FW_RUNTIME, + SMU_FW_HANG, +}; + #define WORKLOAD_POLICY_MAX 7 /* @@ -528,17 +558,22 @@ struct smu_context { uint32_t hard_min_uclk_req_from_dal; bool disable_uclk_switch; + /* asic agnostic workload mask */ uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; - uint32_t workload_setting[WORKLOAD_POLICY_MAX]; + bool pause_workload; + /* default/user workload preference */ uint32_t power_profile_mode; - uint32_t default_power_profile_mode; + uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; + /* backend specific custom workload settings */ + long *custom_profile_params; bool pm_enabled; bool is_apu; uint32_t smc_driver_if_version; uint32_t smc_fw_if_version; uint32_t smc_fw_version; + uint32_t smc_fw_caps; + uint8_t smc_fw_state; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; @@ -579,8 +614,6 @@ struct smu_context { struct delayed_work swctf_delayed_work; - enum pp_xgmi_plpd_mode plpd_mode; - /* data structures for wbrf feature support */ bool wbrf_supported; struct notifier_block wbrf_notifier; @@ -703,15 +736,18 @@ struct pptable_funcs { * @set_power_profile_mode: Set a power profile mode. Also used to * create/set custom power profile modes. * &input: Power profile mode parameters. - * &size: Size of &input. + * &workload_mask: mask of workloads to enable + * &custom_params: custom profile parameters + * &custom_params_max_idx: max valid idx into custom_params */ - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); + int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask, + long *custom_params, u32 custom_params_max_idx); /** * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power * management. */ - int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); /** * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power @@ -831,11 +867,6 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); /** - * @dump_pptable: Print the power play table to the system log. - */ - void (*dump_pptable)(struct smu_context *smu); - - /** * @get_power_limit: Get the device's power limits. */ int (*get_power_limit)(struct smu_context *smu, @@ -856,12 +887,6 @@ struct pptable_funcs { int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); /** - * @select_xgmi_plpd_policy: Select xgmi per-link power down policy. - */ - int (*select_xgmi_plpd_policy)(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode); - - /** * @update_pcie_parameters: Update and upload the system's PCIe * capabilites to the SMU. * &pcie_gen_cap: Maximum allowed PCIe generation. @@ -1174,9 +1199,11 @@ struct pptable_funcs { int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); /** - * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off). + * @get_bamaco_support: Check if GPU supports BACO/MACO + * BACO: Bus Active, Chip Off + * MACO: Memory Active, Chip Off */ - bool (*baco_is_support)(struct smu_context *smu); + int (*get_bamaco_support)(struct smu_context *smu); /** * @baco_get_state: Get the current BACO state. @@ -1204,10 +1231,11 @@ struct pptable_funcs { * @mode1_reset_is_support: Check if GPU supports mode1 reset. */ bool (*mode1_reset_is_support)(struct smu_context *smu); + /** - * @mode2_reset_is_support: Check if GPU supports mode2 reset. + * @link_reset_is_support: Check if GPU supports link reset. */ - bool (*mode2_reset_is_support)(struct smu_context *smu); + bool (*link_reset_is_support)(struct smu_context *smu); /** * @mode1_reset: Perform mode1 reset. @@ -1227,6 +1255,13 @@ struct pptable_funcs { int (*enable_gfx_features)(struct smu_context *smu); /** + * @link_reset: Perform link reset. + * + * The gfx device driver reset + */ + int (*link_reset)(struct smu_context *smu); + + /** * @get_dpm_ultimate_freq: Get the hard frequency range of a clock * domain in MHz. */ @@ -1236,7 +1271,8 @@ struct pptable_funcs { * @set_soft_freq_limited_range: Set the soft frequency range of a clock * domain in MHz. */ - int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, + bool automatic); /** * @set_power_source: Notify the SMU of the current power source. @@ -1348,6 +1384,20 @@ struct pptable_funcs { int (*send_rma_reason)(struct smu_context *smu); /** + * @reset_sdma: message SMU to soft reset sdma instance. + */ + int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); + /** + * @reset_sdma_is_supported: Check if support resets the SDMA engine. + */ + bool (*reset_sdma_is_supported)(struct smu_context *smu); + + /** + * @reset_vcn: message SMU to soft reset vcn instance. + */ + int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); + + /** * @get_ecc_table: message SMU to get ECC INFO table. */ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); @@ -1392,6 +1442,11 @@ struct pptable_funcs { int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); /** + * @set_mall_enable: Init MALL power gating control. + */ + int (*set_mall_enable)(struct smu_context *smu); + + /** * @notify_rlc_state: Notify RLC power state to SMU. */ int (*notify_rlc_state)(struct smu_context *smu, bool en); @@ -1411,6 +1466,12 @@ struct pptable_funcs { */ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, struct freq_band_range *exclusion_ranges); + /** + * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. + * Return: Size of table + */ + ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, + void *table); }; typedef enum { @@ -1483,8 +1544,8 @@ enum smu_baco_seq { BACO_SEQ_COUNT, }; -#define MSG_MAP(msg, index, valid_in_vf) \ - [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} +#define MSG_MAP(msg, index, flags) \ + [SMU_MSG_##msg] = {1, (index), (flags)} #define CLK_MAP(clk, index) \ [SMU_##clk] = {1, (index)} @@ -1548,6 +1609,12 @@ typedef struct { uint32_t MmHubPadding[8]; } WifiBandEntryTable_t; +#define STR_SOC_PSTATE_POLICY "soc_pstate" +#define STR_XGMI_PLPD_POLICY "xgmi_plpd" + +struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, + enum pp_pm_policy p_type); + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, @@ -1555,8 +1622,9 @@ int smu_get_power_limit(void *handle, enum pp_power_type pp_power_type); bool smu_mode1_reset_is_support(struct smu_context *smu); -bool smu_mode2_reset_is_support(struct smu_context *smu); +bool smu_link_reset_is_support(struct smu_context *smu); int smu_mode1_reset(struct smu_context *smu); +int smu_link_reset(struct smu_context *smu); extern const struct amd_ip_funcs smu_ip_funcs; @@ -1595,5 +1663,13 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); int smu_send_rma_reason(struct smu_context *smu); +int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); +bool smu_reset_sdma_is_supported(struct smu_context *smu); +int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask); +int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, + int level); +ssize_t smu_get_pm_policy_info(struct smu_context *smu, + enum pp_pm_policy p_type, char *sysbuf); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h new file mode 100644 index 000000000000..c2fd0a4a13e5 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -0,0 +1,1889 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU14_DRIVER_IF_V14_0_H +#define SMU14_DRIVER_IF_V14_0_H + +//Increment this version if SkuTable_t or BoardTable_t change +#define PPTABLE_VERSION 0x1B + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_DPREFCLK_DPM_LEVELS 8 +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DTBCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 6 +#define NUM_LINK_LEVELS 3 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_OD_FAN_MAX_POINTS 6 + +// Feature Control Defines +#define FEATURE_FW_DATA_READ_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_SOCCLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCN_BIT 7 +#define FEATURE_VMEMP_SCALING_BIT 8 +#define FEATURE_VDDIO_MEM_SCALING_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_FCLK_BIT 12 +#define FEATURE_DS_LCLK_BIT 13 +#define FEATURE_DS_DCFCLK_BIT 14 +#define FEATURE_DS_UCLK_BIT 15 +#define FEATURE_GFX_ULV_BIT 16 +#define FEATURE_FW_DSTATE_BIT 17 +#define FEATURE_GFXOFF_BIT 18 +#define FEATURE_BACO_BIT 19 +#define FEATURE_MM_DPM_BIT 20 +#define FEATURE_SOC_MPCLK_DS_BIT 21 +#define FEATURE_BACO_MPCLK_DS_BIT 22 +#define FEATURE_THROTTLERS_BIT 23 +#define FEATURE_SMARTSHIFT_BIT 24 +#define FEATURE_GTHR_BIT 25 +#define FEATURE_ACDC_BIT 26 +#define FEATURE_VR0HOT_BIT 27 +#define FEATURE_FW_CTF_BIT 28 +#define FEATURE_FAN_CONTROL_BIT 29 +#define FEATURE_GFX_DCS_BIT 30 +#define FEATURE_GFX_READ_MARGIN_BIT 31 +#define FEATURE_LED_DISPLAY_BIT 32 +#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34 +#define FEATURE_OPTIMIZED_VMIN_BIT 35 +#define FEATURE_GFX_IMU_BIT 36 +#define FEATURE_BOOT_TIME_CAL_BIT 37 +#define FEATURE_GFX_PCC_DFLL_BIT 38 +#define FEATURE_SOC_CG_BIT 39 +#define FEATURE_DF_CSTATE_BIT 40 +#define FEATURE_GFX_EDC_BIT 41 +#define FEATURE_BOOT_POWER_OPT_BIT 42 +#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43 +#define FEATURE_DS_VCN_BIT 44 +#define FEATURE_BACO_CG_BIT 45 +#define FEATURE_MEM_TEMP_READ_BIT 46 +#define FEATURE_ATHUB_MMHUB_PG_BIT 47 +#define FEATURE_SOC_PCC_BIT 48 +#define FEATURE_EDC_PWRBRK_BIT 49 +#define FEATURE_SOC_EDC_XVMIN_BIT 50 +#define FEATURE_GFX_PSM_DIDT_BIT 51 +#define FEATURE_APT_ALL_ENABLE_BIT 52 +#define FEATURE_APT_SQ_THROTTLE_BIT 53 +#define FEATURE_APT_PF_DCS_BIT 54 +#define FEATURE_GFX_EDC_XVMIN_BIT 55 +#define FEATURE_GFX_DIDT_XVMIN_BIT 56 +#define FEATURE_FAN_ABNORMAL_BIT 57 +#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL +#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \ + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ + (1 << FEATURE_DPM_UCLK_BIT) | \ + (1 << FEATURE_DPM_FCLK_BIT) | \ + (1 << FEATURE_DPM_SOCCLK_BIT) | \ + (1 << FEATURE_DPM_LINK_BIT) | \ + (1 << FEATURE_DPM_DCN_BIT) | \ + (1 << FEATURE_DS_GFXCLK_BIT) | \ + (1 << FEATURE_DS_SOCCLK_BIT) | \ + (1 << FEATURE_DS_FCLK_BIT) | \ + (1 << FEATURE_DS_LCLK_BIT) | \ + (1 << FEATURE_DS_DCFCLK_BIT) | \ + (1 << FEATURE_DS_UCLK_BIT) | \ + (1ULL << FEATURE_DS_VCN_BIT) + + +//For use with feature control messages +typedef enum { + FEATURE_PWR_ALL, + FEATURE_PWR_S5, + FEATURE_PWR_BACO, + FEATURE_PWR_SOC, + FEATURE_PWR_GFX, + FEATURE_PWR_DOMAIN_COUNT, +} FEATURE_PWR_DOMAIN_e; + +//For use with feature control + BTC save restore +typedef enum { + FEATURE_BTC_NOP, + FEATURE_BTC_SAVE, + FEATURE_BTC_RESTORE, + FEATURE_BTC_COUNT, +} FEATURE_BTC_e; + +// Debug Overrides Bitmask +#define DEBUG_OVERRIDE_NOT_USE 0x00000001 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 +#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010 +#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020 +#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040 +#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080 +#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100 +#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 +#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 +#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 +#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 +#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 +#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 +#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 +#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +typedef enum { + SVI_PSI_0, // Full phase count (default) + SVI_PSI_1, // Phase count 1st level + SVI_PSI_2, // Phase count 2nd level + SVI_PSI_3, // Single phase operation + active diode emulation + SVI_PSI_4, // Single phase operation + passive diode emulation *optional* + SVI_PSI_5, // Reserved + SVI_PSI_6, // Power down to 0V (voltage regulation disabled) + SVI_PSI_7, // Automated phase shedding and diode emulation +} SVI_PSI_e; + +// Throttler Control/Status Bits +#define THROTTLER_TEMP_EDGE_BIT 0 +#define THROTTLER_TEMP_HOTSPOT_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2 +#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3 +#define THROTTLER_TEMP_MEM_BIT 4 +#define THROTTLER_TEMP_VR_GFX_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TEMP_VR_MEM0_BIT 7 +#define THROTTLER_TEMP_VR_MEM1_BIT 8 +#define THROTTLER_TEMP_LIQUID0_BIT 9 +#define THROTTLER_TEMP_LIQUID1_BIT 10 +#define THROTTLER_TEMP_PLX_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_GFX_APCC_PLUS_BIT 19 +#define THROTTLER_GFX_DVO_BIT 20 +#define THROTTLER_COUNT 21 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2 +#define FW_DSTATE_SMN_DS_BIT 3 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4 +#define FW_DSTATE_SOC_LIV_MIN_BIT 5 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6 +#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7 +#define FW_DSTATE_MALL_ALLOC_BIT 8 +#define FW_DSTATE_MEM_PSI_BIT 9 +#define FW_DSTATE_HSR_NON_STROBE_BIT 10 +#define FW_DSTATE_MP0_ENTER_WFI_BIT 11 +#define FW_DSTATE_MALL_FLUSH_BIT 12 +#define FW_DSTATE_SOC_PSI_BIT 13 +#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14 +#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15 +#define FW_DSTATE_CLDO_PRG_BIT 16 +#define FW_DSTATE_DF_PLL_PWRDN_BIT 17 + +//LED Display Mask & Control Bits +#define LED_DISPLAY_GFX_DPM_BIT 0 +#define LED_DISPLAY_PCIE_BIT 1 +#define LED_DISPLAY_ERROR_BIT 2 + + +#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0 +#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1 +#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2 + +typedef enum { + SMARTSHIFT_VERSION_1, + SMARTSHIFT_VERSION_2, + SMARTSHIFT_VERSION_3, +} SMARTSHIFT_VERSION_e; + +typedef enum { + FOPT_CALC_AC_CALC_DC, + FOPT_PPTABLE_AC_CALC_DC, + FOPT_CALC_AC_PPTABLE_DC, + FOPT_PPTABLE_AC_PPTABLE_DC, +} FOPT_CALC_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8 = 8, + DRAM_BIT_WIDTH_X_16 = 16, + DRAM_BIT_WIDTH_X_32 = 32, + DRAM_BIT_WIDTH_X_64 = 64, + DRAM_BIT_WIDTH_X_128 = 128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VMEMP, + I2C_CONTROLLER_THROTTLER_VR_VDDIO, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, + I2C_CONTROLLER_THROTTLER_INA3221, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[24]; +} EccInfoTable_t; + +#define EPCS_HIGH_POWER 600 +#define EPCS_NORMAL_POWER 450 +#define EPCS_LOW_POWER 300 +#define EPCS_SHORTED_POWER 150 +#define EPCS_NO_BOOTUP 0 + +typedef enum{ + EPCS_SHORTED_LIMIT, + EPCS_LOW_POWER_LIMIT, + EPCS_NORMAL_POWER_LIMIT, + EPCS_HIGH_POWER_LIMIT, + EPCS_NOT_CONFIGURED, + EPCS_STATUS_COUNT, +} EPCS_STATUS_e; + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +} D3HOTSequence_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + DCS_ARCH_DISABLED, + DCS_ARCH_FADCS, + DCS_ARCH_ASYNC, +} DCS_ARCH_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DISPCLK, + PPCLK_DPPCLK, + PPCLK_DPREFCLK, + PPCLK_DCFCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_PPTABLE = 0, + VOLTAGE_MODE_FUSES, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + AVFS_TEMP_COLD = 0, + AVFS_TEMP_HOT, + AVFS_TEMP_COUNT, +} AVFS_TEMP_e; + +typedef enum { + AVFS_D_G, + AVFS_D_COUNT, +} AVFS_D_e; + + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, + PWR_CONFIG_TBP_DESKTOP, + PWR_CONFIG_TBP_MOBILE, +} PwrConfig_e; + +typedef struct { + uint8_t Padding; + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + uint32_t Padding3[3]; + uint16_t Padding4; + uint16_t FoptimalDc; //Foptimal frequency in DC power mode. + uint16_t FoptimalAc; //Foptimal frequency in AC power mode. + uint16_t Padding2; +} DpmDescriptor_t; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_HOTSPOT_GFX, + TEMP_HOTSPOT_SOC, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT, +} TEMP_e; + +typedef enum { + TDC_THROTTLER_GFX, + TDC_THROTTLER_SOC, + TDC_THROTTLER_COUNT +} TDC_THROTTLER_e; + +typedef enum { + SVI_PLANE_VDD_GFX, + SVI_PLANE_VDD_SOC, + SVI_PLANE_VDDCI_MEM, + SVI_PLANE_VDDIO_MEM, + SVI_PLANE_COUNT, +} SVI_PLANE_e; + +typedef enum { + PMFW_VOLT_PLANE_GFX, + PMFW_VOLT_PLANE_SOC, + PMFW_VOLT_PLANE_COUNT +} PMFW_VOLT_PLANE_e; + +typedef enum { + CUSTOMER_VARIANT_ROW, + CUSTOMER_VARIANT_FALCON, + CUSTOMER_VARIANT_COUNT, +} CUSTOMER_VARIANT_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + MEM_VENDOR_PLACEHOLDER0, // 0 + MEM_VENDOR_SAMSUNG, // 1 + MEM_VENDOR_INFINEON, // 2 + MEM_VENDOR_ELPIDA, // 3 + MEM_VENDOR_ETRON, // 4 + MEM_VENDOR_NANYA, // 5 + MEM_VENDOR_HYNIX, // 6 + MEM_VENDOR_MOSEL, // 7 + MEM_VENDOR_WINBOND, // 8 + MEM_VENDOR_ESMT, // 9 + MEM_VENDOR_PLACEHOLDER1, // 10 + MEM_VENDOR_PLACEHOLDER2, // 11 + MEM_VENDOR_PLACEHOLDER3, // 12 + MEM_VENDOR_PLACEHOLDER4, // 13 + MEM_VENDOR_PLACEHOLDER5, // 14 + MEM_VENDOR_MICRON, // 15 + MEM_VENDOR_COUNT, +} MEM_VENDOR_e; + +typedef enum { + PP_GRTAVFS_HW_CPO_CTL_ZONE0, + PP_GRTAVFS_HW_CPO_CTL_ZONE1, + PP_GRTAVFS_HW_CPO_CTL_ZONE2, + PP_GRTAVFS_HW_CPO_CTL_ZONE3, + PP_GRTAVFS_HW_CPO_CTL_ZONE4, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4, + PP_GRTAVFS_HW_ZONE0_VF, + PP_GRTAVFS_HW_ZONE1_VF1, + PP_GRTAVFS_HW_ZONE2_VF2, + PP_GRTAVFS_HW_ZONE3_VF3, + PP_GRTAVFS_HW_VOLTAGE_GB, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4, + PP_GRTAVFS_HW_RESERVED_0, + PP_GRTAVFS_HW_RESERVED_1, + PP_GRTAVFS_HW_RESERVED_2, + PP_GRTAVFS_HW_RESERVED_3, + PP_GRTAVFS_HW_RESERVED_4, + PP_GRTAVFS_HW_RESERVED_5, + PP_GRTAVFS_HW_RESERVED_6, + PP_GRTAVFS_HW_FUSE_COUNT, +} PP_GRTAVFS_HW_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4, + PP_GRTAVFS_FW_COMMON_FUSE_COUNT, +} PP_GRTAVFS_FW_COMMON_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4, + PP_GRTAVFS_FW_SEP_FUSE_COUNT, +} PP_GRTAVFS_FW_SEP_FUSE_e; + +#define PP_NUM_RTAVFS_PWL_ZONES 5 +#define PP_NUM_PSM_DIDT_PWL_ZONES 3 + +// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 +// Slope Q1.7, Offset Q1.2 +typedef struct { + int8_t Offset; // in Amps + uint8_t Padding; + uint16_t MaxCurrent; // in Amps +} SviTelemetryScale_t; + +#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 + +#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 +#define PP_OD_FEATURE_GFX_VMAX_BIT 1 +#define PP_OD_FEATURE_SOC_VMAX_BIT 2 +#define PP_OD_FEATURE_PPT_BIT 3 +#define PP_OD_FEATURE_FAN_CURVE_BIT 4 +#define PP_OD_FEATURE_FAN_LEGACY_BIT 5 +#define PP_OD_FEATURE_FULL_CTRL_BIT 6 +#define PP_OD_FEATURE_TDC_BIT 7 +#define PP_OD_FEATURE_GFXCLK_BIT 8 +#define PP_OD_FEATURE_UCLK_BIT 9 +#define PP_OD_FEATURE_FCLK_BIT 10 +#define PP_OD_FEATURE_ZERO_FAN_BIT 11 +#define PP_OD_FEATURE_TEMPERATURE_BIT 12 +#define PP_OD_FEATURE_EDC_BIT 13 +#define PP_OD_FEATURE_COUNT 14 + +typedef enum { + PP_OD_POWER_FEATURE_ALWAYS_ENABLED, + PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING, + PP_OD_POWER_FEATURE_ALWAYS_DISABLED, +} PP_OD_POWER_FEATURE_e; + +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + +typedef enum { + OD_NO_ERROR, + OD_REQUEST_ADVANCED_NOT_SUPPORTED, + OD_UNSUPPORTED_FEATURE, + OD_INVALID_FEATURE_COMBO_ERROR, + OD_GFXCLK_VF_CURVE_OFFSET_ERROR, + OD_VDD_GFX_VMAX_ERROR, + OD_VDD_SOC_VMAX_ERROR, + OD_PPT_ERROR, + OD_FAN_MIN_PWM_ERROR, + OD_FAN_ACOUSTIC_TARGET_ERROR, + OD_FAN_ACOUSTIC_LIMIT_ERROR, + OD_FAN_TARGET_TEMP_ERROR, + OD_FAN_ZERO_RPM_STOP_TEMP_ERROR, + OD_FAN_CURVE_PWM_ERROR, + OD_FAN_CURVE_TEMP_ERROR, + OD_FULL_CTRL_GFXCLK_ERROR, + OD_FULL_CTRL_UCLK_ERROR, + OD_FULL_CTRL_FCLK_ERROR, + OD_FULL_CTRL_VDD_GFX_ERROR, + OD_FULL_CTRL_VDD_SOC_ERROR, + OD_TDC_ERROR, + OD_GFXCLK_ERROR, + OD_UCLK_ERROR, + OD_FCLK_ERROR, + OD_OP_TEMP_ERROR, + OD_OP_GFX_EDC_ERROR, + OD_OP_GFX_PCC_ERROR, + OD_POWER_FEATURE_CTRL_ERROR, +} OD_FAIL_e; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Voltage control + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + + uint16_t VddGfxVmax; // in mV + uint16_t VddSocVmax; + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Padding; + + //Frequency changes + int16_t GfxclkFoffset; + uint16_t Padding1; + uint16_t UclkFmin; + uint16_t UclkFmax; + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + int16_t Tdc; + + //Fan control + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + uint8_t FanZeroRpmEnable; + uint8_t FanZeroRpmStopTemp; + uint8_t FanMode; + uint8_t MaxOpTemp; + + uint8_t AdvancedOdModeEnabled; + uint8_t Padding2[3]; + + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + uint16_t Padding3; + + int16_t GfxEdc; + int16_t GfxPccLimitControl; + + uint16_t GfxclkFmaxVmax; + uint8_t GfxclkFmaxVmaxTemperature; + uint8_t Padding4[1]; + + uint32_t Spare[9]; + uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround +} OverDriveTable_t; + +typedef struct { + OverDriveTable_t OverDriveTable; + +} OverDriveTableExternal_t; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Gfx Vf Curve + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + //gfx Vmax + uint16_t VddGfxVmax; // in mV + //soc Vmax + uint16_t VddSocVmax; + + //gfxclk + int16_t GfxclkFoffset; + uint16_t Padding; + //uclk + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + //fclk + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + //TDC + int16_t Tdc; + + //Fan Curve + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + //Fan Legacy + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + //zero fan + uint8_t FanZeroRpmEnable; + //temperature + uint8_t MaxOpTemp; + uint8_t Padding1[2]; + + //Full Ctrl + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + //EDC + int16_t GfxEdc; + int16_t GfxPccLimitControl; + int16_t Padding2; + + uint32_t Spare[5]; +} OverDriveLimits_t; + +typedef enum { + BOARD_GPIO_SMUIO_0, + BOARD_GPIO_SMUIO_1, + BOARD_GPIO_SMUIO_2, + BOARD_GPIO_SMUIO_3, + BOARD_GPIO_SMUIO_4, + BOARD_GPIO_SMUIO_5, + BOARD_GPIO_SMUIO_6, + BOARD_GPIO_SMUIO_7, + BOARD_GPIO_SMUIO_8, + BOARD_GPIO_SMUIO_9, + BOARD_GPIO_SMUIO_10, + BOARD_GPIO_SMUIO_11, + BOARD_GPIO_SMUIO_12, + BOARD_GPIO_SMUIO_13, + BOARD_GPIO_SMUIO_14, + BOARD_GPIO_SMUIO_15, + BOARD_GPIO_SMUIO_16, + BOARD_GPIO_SMUIO_17, + BOARD_GPIO_SMUIO_18, + BOARD_GPIO_SMUIO_19, + BOARD_GPIO_SMUIO_20, + BOARD_GPIO_SMUIO_21, + BOARD_GPIO_SMUIO_22, + BOARD_GPIO_SMUIO_23, + BOARD_GPIO_SMUIO_24, + BOARD_GPIO_SMUIO_25, + BOARD_GPIO_SMUIO_26, + BOARD_GPIO_SMUIO_27, + BOARD_GPIO_SMUIO_28, + BOARD_GPIO_SMUIO_29, + BOARD_GPIO_SMUIO_30, + BOARD_GPIO_SMUIO_31, + MAX_BOARD_GPIO_SMUIO_NUM, + BOARD_GPIO_DC_GEN_A, + BOARD_GPIO_DC_GEN_B, + BOARD_GPIO_DC_GEN_C, + BOARD_GPIO_DC_GEN_D, + BOARD_GPIO_DC_GEN_E, + BOARD_GPIO_DC_GEN_F, + BOARD_GPIO_DC_GEN_G, + BOARD_GPIO_DC_GENLK_CLK, + BOARD_GPIO_DC_GENLK_VSYNC, + BOARD_GPIO_DC_SWAPLOCK_A, + BOARD_GPIO_DC_SWAPLOCK_B, + MAX_BOARD_DC_GPIO_NUM, + BOARD_GPIO_LV_EN, +} BOARD_GPIO_TYPE_e; + +#define INVALID_BOARD_GPIO 0xFF + + +typedef struct { + //PLL 0 + uint16_t InitImuClk; + uint16_t InitSocclk; + uint16_t InitMpioclk; + uint16_t InitSmnclk; + //PLL 1 + uint16_t InitDispClk; + uint16_t InitDppClk; + uint16_t InitDprefclk; + uint16_t InitDcfclk; + uint16_t InitDtbclk; + uint16_t InitDbguSocClk; + //PLL 2 + uint16_t InitGfxclk_bypass; + uint16_t InitMp1clk; + uint16_t InitLclk; + uint16_t InitDbguBacoClk; + uint16_t InitBaco400clk; + uint16_t InitBaco1200clk_bypass; + uint16_t InitBaco700clk_bypass; + uint16_t InitBaco500clk; + // PLL 3 + uint16_t InitDclk0; + uint16_t InitVclk0; + // PLL 4 + uint16_t InitFclk; + uint16_t Padding1; + // PLL 5 + //UCLK clocks, assumed all UCLK instances will be the same. + uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk + + uint8_t Padding[3]; + + uint32_t InitVcoFreqPll0; //smu_socclk_t + uint32_t InitVcoFreqPll1; //smu_displayclk_t + uint32_t InitVcoFreqPll2; //smu_nbioclk_t + uint32_t InitVcoFreqPll3; //smu_vcnclk_t + uint32_t InitVcoFreqPll4; //smu_fclk_t + uint32_t InitVcoFreqPll5; //smu_uclk_01_t + uint32_t InitVcoFreqPll6; //smu_uclk_23_t + uint32_t InitVcoFreqPll7; //smu_uclk_45_t + uint32_t InitVcoFreqPll8; //smu_uclk_67_t + + //encoding will be SVI3 + uint16_t InitGfx; // In mV(Q2) , should be 0? + uint16_t InitSoc; // In mV(Q2) + uint16_t InitVddIoMem; // In mV(Q2) MemVdd + uint16_t InitVddCiMem; // In mV(Q2) VMemP + + //uint16_t Padding2; + + uint32_t Spare[8]; +} BootValues_t; + +typedef struct { + uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts + uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps + + uint16_t Temperature[TEMP_COUNT]; // Celsius + + uint8_t PwmLimitMin; + uint8_t PwmLimitMax; + uint8_t FanTargetTemperature; + uint8_t Spare1[1]; + + uint16_t AcousticTargetRpmThresholdMin; + uint16_t AcousticTargetRpmThresholdMax; + + uint16_t AcousticLimitRpmThresholdMin; + uint16_t AcousticLimitRpmThresholdMax; + + uint16_t PccLimitMin; + uint16_t PccLimitMax; + + uint16_t FanStopTempMin; + uint16_t FanStopTempMax; + uint16_t FanStartTempMin; + uint16_t FanStartTempMax; + + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; +} MsgLimits_t; + +typedef struct { + uint16_t BaseClockAc; + uint16_t GameClockAc; + uint16_t BoostClockAc; + uint16_t BaseClockDc; + uint16_t GameClockDc; + uint16_t BoostClockDc; + uint16_t MaxReportedClock; + uint16_t Padding; + uint32_t Reserved[3]; +} DriverReportedClocks_t; + +typedef struct { + uint8_t DcBtcEnabled; + uint8_t Padding[3]; + + uint16_t DcTol; // mV Q2 + uint16_t DcBtcGb; // mV Q2 + + uint16_t DcBtcMin; // mV Q2 + uint16_t DcBtcMax; // mV Q2 + + LinearInt_t DcBtcGbScalar; +} AvfsDcBtcParams_t; + +typedef struct { + uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C + uint16_t VftFMin; // in MHz + uint16_t VInversion; // in mV Q2 + QuadraticInt_t qVft[AVFS_TEMP_COUNT]; + QuadraticInt_t qAvfsGb; + QuadraticInt_t qAvfsGb2; +} AvfsFuseOverride_t; + +//all settings maintained by PFE team +typedef struct { + uint8_t Version; + uint8_t Spare8[3]; + // SECTION: Feature Control + uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping + // SECTION: Advanced Options + uint32_t DebugOverrides; + + uint32_t Spare[2]; +} PFE_Settings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different) + + // SECTION: Miscellaneous Configuration + uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e + uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e + uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT + uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e + + // SECTION: Infrastructure Limits + uint8_t SocketPowerLimitSpare[10]; + + //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars + //relative index 0 + uint8_t EnableLegacyPptLimit; + uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support + + uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting + + uint8_t PaddingPpt[7]; + + uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only + + uint16_t PaddingInfra; + + // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years) + uint32_t FitControllerFailureRateLimit; //in IEEE float + //Expected GFX Duty Cycle at Vmax. + uint32_t FitControllerGfxDutyCycle; // in IEEE float + //Expected SOC Duty Cycle at Vmax. + uint32_t FitControllerSocDutyCycle; // in IEEE float + + //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block. + uint32_t FitControllerSocOffset; //in IEEE float + + uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping + + + // SECTION: Voltage Control Parameters + uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE) + + uint8_t Padding[2]; + uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE + + // Voltage Limits + uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled + uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled + + //Vmin Optimizations + int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin + int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin + uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot. + uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold. + uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot. + uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold. + uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin + uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot + uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold + + //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for. + uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT]; + //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts. + uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT]; + //Scalar coefficient of the PSM aging degradation function + uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM + //Exponential coefficient of the PSM aging degradation function + uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM + //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN + //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN + + uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT]; + uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT]; + + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + QuadraticInt_t Gfx_Vmin_droop; + QuadraticInt_t Soc_Vmin_droop; + uint32_t SpareVmin[6]; + + //SECTION: DPM Configuration 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t GfxclkAibFmax; + uint16_t GfxDpmPadding; + + //GFX Idle Power Settings + uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz + uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz + uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz + uint16_t GfxclkThrottleClock; //Used primarily in DCS + uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + uint8_t GfxIdlePadding; + + uint8_t SmsRepairWRCKClkDivEn; + uint8_t SmsRepairWRCKClkDivVal; + uint8_t GfxOffEntryEarlyMGCGEn; + uint8_t GfxOffEntryForceCGCGEn; + uint8_t GfxOffEntryForceCGCGDelayEn; + uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds + + uint16_t GfxclkFreqGfxUlv; // in MHz + uint8_t GfxIdlePadding2[2]; + uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry + uint32_t GfxoffSpare[15]; + + // DFLL + uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint32_t DfllBtcMasterScalerM; + int32_t DfllBtcMasterScalerB; + uint32_t DfllBtcSlaveScalerM; + int32_t DfllBtcSlaveScalerB; + + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + uint32_t GfxDfllSpare[9]; + + // DVO + uint32_t DvoPsmDownThresholdVoltage; //Voltage float + uint32_t DvoPsmUpThresholdVoltage; //Voltage float + uint32_t DvoFmaxLowScaler; //Unitless float + + // GFX DCS + uint32_t PaddingDcs; + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsPfGfxFopt; //Default to GFX FMIN + uint32_t DcsPfUclkFopt; //Default to UCLK FMIN + + uint8_t FoptEnabled; + uint8_t DcsSpare2[3]; + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsSpare[9]; + + // UCLK section + uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow) + uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow) + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t DalDcModeMaxUclkFreq; + uint8_t PaddingsMem[2]; + //FCLK Section + uint32_t PaddingFclk; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: VDD_GFX AVFS + uint8_t OverrideGfxAvfsFuses; + uint8_t GfxAvfsPadding[1]; + uint16_t DroopGBStDev; + + uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain + uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding + //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1]; + uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t spare_HwRtAvfsFuses[19]; + + uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + + uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + + uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t dGbV_dT_vmin; + uint32_t dGbV_dT_vmax; + + uint32_t PaddingV2F[4]; + + AvfsDcBtcParams_t DcBtcGfxParams; + QuadraticInt_t SSCurve_GFX; + uint32_t GfxAvfsSpare[29]; + + //SECTION: VDD_SOC AVFS + uint8_t OverrideSocAvfsFuses; + uint8_t MinSocAvfsRevision; + uint8_t SocAvfsPadding[2]; + + AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT]; + + DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb + + LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V + + AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT]; + + QuadraticInt_t SSCurve_SOC; + uint32_t SocAvfsSpare[29]; + + //SECTION: Boot clock and voltage values + BootValues_t BootValues; + + //SECTION: Driver Reported Clocks + DriverReportedClocks_t DriverReportedClocks; + + //SECTION: Message Limits + MsgLimits_t MsgLimits; + + //SECTION: OverDrive Limits + OverDriveLimits_t OverDriveLimitsBasicMin; + OverDriveLimits_t OverDriveLimitsBasicMax; + OverDriveLimits_t OverDriveLimitsAdvancedMin; + OverDriveLimits_t OverDriveLimitsAdvancedMax; + + // Section: Total Board Power idle vs active coefficients + uint8_t TotalBoardPowerSupport; + uint8_t TotalBoardPowerPadding[1]; + uint16_t TotalBoardPowerRoc; + + //PMFW-11158 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + + // APT GFX to UCLK mapping + int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6]; + uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6]; + uint32_t AptPadding; + + // Xvmin didt + QuadraticInt_t GfxXvminDidtDroopThresh; + uint32_t GfxXvminDidtResetDDWait; + uint32_t GfxXvminDidtClkStopWait; + uint32_t GfxXvminDidtFcsStepCtrl; + uint32_t GfxXvminDidtFcsWaitCtrl; + + // PSM based didt controller + uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode + uint32_t P2v_a; // floating point in U32 format + uint32_t P2v_b; + uint32_t P2v_c; + uint32_t T2p_a; + uint32_t T2p_b; + uint32_t T2p_c; + uint32_t P2vTemp; + QuadraticInt_t PsmDidtStaticSettings; + QuadraticInt_t PsmDidtDynamicSettings; + uint8_t PsmDidtAvgDiv; + uint8_t PsmDidtForceStall; + uint16_t PsmDidtReleaseTimer; + uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog + // CAC EDC + uint32_t CacEdcCacLeakageC0; + uint32_t CacEdcCacLeakageC1; + uint32_t CacEdcCacLeakageC2; + uint32_t CacEdcCacLeakageC3; + uint32_t CacEdcCacLeakageC4; + uint32_t CacEdcCacLeakageC5; + uint32_t CacEdcGfxClkScalar; + uint32_t CacEdcGfxClkIntercept; + uint32_t CacEdcCac_m; + uint32_t CacEdcCac_b; + uint32_t CacEdcCurrLimitGuardband; + uint32_t CacEdcDynToTotalCacRatio; + // GFX EDC XVMIN + uint32_t XVmin_Gfx_EdcThreshScalar; + uint32_t XVmin_Gfx_EdcEnableFreq; + uint32_t XVmin_Gfx_EdcPccAsStepCtrl; + uint32_t XVmin_Gfx_EdcPccAsWaitCtrl; + uint16_t XVmin_Gfx_EdcThreshold; + uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl; + // SOC EDC XVMIN + uint32_t XVmin_Soc_EdcThreshScalar; + uint32_t XVmin_Soc_EdcEnableFreq; + uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react. + uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted. + uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted. + uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts. + uint8_t PaddingSocEdc[3]; + + // Fuse Override for SOC and GFX XVMIN + uint8_t GfxXvminFuseOverride; + uint8_t SocXvminFuseOverride; + uint8_t PaddingXvminFuseOverride[2]; + uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + + + uint16_t GfxXvminFddVolt0; // low voltage, in VID + uint16_t GfxXvminFddVolt1; // mid voltage, in VID + uint16_t GfxXvminFddVolt2; // high voltage, in VID + uint16_t SocXvminFddVolt0; // low voltage, in VID + uint16_t SocXvminFddVolt1; // mid voltage, in VID + uint16_t SocXvminFddVolt2; // high voltage, in VID + uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse + uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse + uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse + + // SECTION: Sku Reserved + uint32_t Spare; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} SkuTable_t; + +typedef struct { + uint8_t SlewRateConditions; + uint8_t LoadLineAdjust; + uint8_t VoutOffset; + uint8_t VidMax; + uint8_t VidMin; + uint8_t TenBitTelEn; + uint8_t SixteenBitTelEn; + uint8_t OcpThresh; + uint8_t OcpWarnThresh; + uint8_t OcpSettings; + uint8_t VrhotThresh; + uint8_t OtpThresh; + uint8_t UvpOvpDeltaRef; + uint8_t PhaseShed; + uint8_t Padding[10]; + uint32_t SettingOverrideMask; +} Svi3RegulatorSettings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; //should be unique to each board type + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + //SECTION SVI3 Board Parameters + uint8_t SlaveAddrMapping[SVI_PLANE_COUNT]; + uint8_t VrPsiSupport[SVI_PLANE_COUNT]; + + uint32_t Svi3SvcSpeed; + uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3 + + // SECTION: Voltage Regulator Settings + Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT]; + + // SECTION: GPIO Settings + uint8_t LedOffGpio; + uint8_t FanOffGpio; + uint8_t GfxVrPowerStageOffGpio; + + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t PaddingLed; + + // SECTION: Clock Spread Spectrum + + // UCLK Spread Spectrum + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 + uint8_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; + + // DFLL Spread Spectrum + uint8_t GfxclkSpreadEnable; + + // FCLK Spread Spectrum + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e + uint8_t PaddingMem1[7]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + uint32_t Paddign1; + uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS + + uint8_t FuseWritePowerMuxPresent; + uint8_t FuseWritePadding[3]; + + // SECTION: EDC Params + uint32_t LoadlineGfx; + uint32_t LoadlineSoc; + uint32_t GfxEdcLimit; + uint32_t SocEdcLimit; + + uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry + uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors + + uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0 + uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1 + uint8_t PaddingEpcs[2]; + + // SECTION: Board Reserved + uint32_t BoardSpare[52]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} BoardTable_t; + +typedef struct { + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported + + uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature + + int16_t TotalIdleBoardPowerM; + int16_t TotalIdleBoardPowerB; + int16_t TotalBoardPowerM; + int16_t TotalBoardPowerB; + + uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input + + // SECTION: Fan Control + uint16_t FanStopTemp[TEMP_COUNT]; //Celsius + uint16_t FanStartTemp[TEMP_COUNT]; //Celsius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanMaximumRpm; + uint16_t MGpuAcousticLimitRpmThreshold; + uint16_t FanTargetGfxclk; + uint32_t TempInputSelectMask; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint16_t FanPadding; + uint16_t FanTargetTemperature[TEMP_COUNT]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FanPadding2; + + uint16_t FwCtfLimit[TEMP_COUNT]; + + uint16_t IntakeTempEnableRPM; + int16_t IntakeTempOffsetTemp; + uint16_t IntakeTempReleaseTemp; + uint16_t IntakeTempHighIntakeAcousticLimit; + + uint16_t IntakeTempAcouticLimitReleaseRate; + int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset + uint16_t FanStalledTriggerRpm; // + uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm + + uint16_t FanSpare[1]; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding; + uint32_t FanSpare2[12]; + + uint32_t ODFeatureCtrlMask; + + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron + uint16_t TemperatureFwCtfLimit_Hynix; + uint16_t TemperatureFwCtfLimit_Micron; + + // SECTION: Board Reserved + uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported + uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift + uint16_t CustomSkuSpare16b; + uint32_t CustomSkuSpare32b[10]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} CustomSkuTable_t; + +typedef struct { + PFE_Settings_t PFE_Settings; + SkuTable_t SkuTable; + CustomSkuTable_t CustomSkuTable; + BoardTable_t BoardTable; +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t FclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t UclkMaxActivityLpfTau; + uint16_t SocketPowerLpfTau; + uint16_t VcnClkAverageLpfTau; + uint16_t VcnUsageAverageLpfTau; + uint16_t PcieActivityLpTau; +} DriverSmuConfig_t; + +typedef struct { + DriverSmuConfig_t DriverSmuConfig; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfigExternal_t; + + +typedef struct { + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t Padding; + + uint32_t Spare[32]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use + +} DriverInfoTable_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyTarget; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock + uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t AveragePCIeBusy ; + uint16_t dGPU_W_MAX ; + uint16_t padding ; + + uint16_t MovingAverageGfxclkFrequencyTarget; + uint16_t MovingAverageGfxclkFrequencyPreDs; + uint16_t MovingAverageGfxclkFrequencyPostDs; + uint16_t MovingAverageFclkFrequencyPreDs; + uint16_t MovingAverageFclkFrequencyPostDs; + uint16_t MovingAverageMemclkFrequencyPreDs; + uint16_t MovingAverageMemclkFrequencyPostDs; + uint16_t MovingAverageVclk0Frequency; + uint16_t MovingAverageDclk0Frequency; + uint16_t MovingAverageGfxActivity; + uint16_t MovingAverageUclkActivity; + uint16_t MovingAverageVcn0ActivityPercentage; + uint16_t MovingAveragePCIeBusy; + uint16_t MovingAverageUclkActivity_MAX; + uint16_t MovingAverageSocketPower; + uint16_t MovingAveragePadding; + + uint32_t MetricsCounter ; + + uint16_t AvgVoltage[SVI_PLANE_COUNT]; + uint16_t AvgCurrent[SVI_PLANE_COUNT]; + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint16_t AverageVcn0ActivityPercentage; + uint16_t Vcn1ActivityPercentage ; + + uint32_t EnergyAccumulator; + uint16_t AverageSocketPower; + uint16_t AverageTotalBoardPower; + + uint16_t AvgTemperature[TEMP_COUNT]; + uint16_t AvgTemperatureFanIntake; + + uint8_t PcieRate ; + uint8_t PcieWidth ; + + uint8_t AvgFanPwm; + uint8_t Padding[1]; + uint16_t AvgFanRpm; + + + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + uint8_t VmaxThrottlingPercentage; + uint8_t padding1[2]; + + //metrics for D3hot entry/exit and driver ARM msgs + uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + uint16_t ApuSTAPMSmartShiftLimit; + uint16_t ApuSTAPMLimit; + uint16_t AvgApuSocketPower; + + uint16_t AverageUclkActivity_MAX; + + uint32_t PublicSerialNumberLower; + uint32_t PublicSerialNumberUpper; + +} SmuMetrics_t; + +typedef struct { + SmuMetrics_t SmuMetrics; + uint32_t Spare[30]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetricsExternal_t; + +typedef struct { + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + uint32_t Spare[16]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + uint16_t maxPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + float maxPsmVoltage[76]; +} AvfsDebugTable_t; + +typedef struct { + AvfsDebugTable_t AvfsDebugTable; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTableExternal_t; + + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t PaddingGfx; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t PaddingFclk; + uint16_t Fclk_MinActiveFreq; // MHz + uint16_t Fclk_BoosterFreq; // MHz + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Fclk_PD_Data_limit_a; // Q16 + uint32_t Fclk_PD_Data_limit_b; // Q16 + uint32_t Fclk_PD_Data_limit_c; // Q16 + uint32_t Fclk_PD_Data_error_coeff; // Q16 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16 + uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_Fps; + +} DpmActivityMonitorCoeffInt_t; + + +typedef struct { + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffIntExternal_t; + + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7 +#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8 +#define WORKLOAD_PPLIB_CGVDI_BIT 9 +#define WORKLOAD_PPLIB_COUNT 10 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +#define TABLE_PPT_FAILED 0x100 +#define TABLE_TDC_FAILED 0x200 +#define TABLE_TEMP_FAILED 0x400 +#define TABLE_FAN_TARGET_TEMP_FAILED 0x800 +#define TABLE_FAN_STOP_TEMP_FAILED 0x1000 +#define TABLE_FAN_START_TEMP_FAILED 0x2000 +#define TABLE_FAN_PWM_MIN_FAILED 0x4000 +#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000 +#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000 +#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000 + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_COMBO_PPTABLE 1 +#define TABLE_WATERMARKS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_DRIVER_INFO 10 +#define TABLE_ECCINFO 11 +#define TABLE_CUSTOM_SKUTABLE 12 +#define TABLE_COUNT 13 + +//IH Interupt ID +#define IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 97522c085258..1bc30db22f9c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -169,7 +169,6 @@ typedef struct { uint8_t VpeClkLevelsEnabled; uint8_t NumMemPstatesEnabled; uint8_t NumFclkLevelsEnabled; - uint8_t spare; uint32_t MinGfxClk; uint32_t MaxGfxClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h new file mode 100644 index 000000000000..d7505cfc433a --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -0,0 +1,281 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_12_PMFW_H +#define SMU_13_0_12_PMFW_H + +#define NUM_VCLK_DPM_LEVELS 4 +#define NUM_DCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 4 +#define NUM_LCLK_DPM_LEVELS 4 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_XGMI_DPM_LEVELS 2 +#define NUM_CXL_BITRATES 4 +#define NUM_PCIE_BITRATES 4 +#define NUM_XGMI_BITRATES 4 +#define NUM_XGMI_WIDTHS 3 +#define NUM_TDP_GROUPS 4 +#define NUM_SOC_P2S_TABLES 6 +#define NUM_GFX_P2S_TABLES 8 +#define NUM_PSM_DIDT_THRESHOLDS 3 +#define NUM_XVMIN_VMIN_THRESHOLDS 3 + +#define PRODUCT_MODEL_NUMBER_LEN 20 +#define PRODUCT_NAME_LEN 64 +#define PRODUCT_SERIAL_LEN 20 +#define PRODUCT_MANUFACTURER_NAME_LEN 32 +#define PRODUCT_FRU_ID_LEN 32 + +typedef enum { +/*0*/ FEATURE_DATA_CALCULATION = 0, +/*1*/ FEATURE_DPM_FCLK = 1, +/*2*/ FEATURE_DPM_GFXCLK = 2, +/*3*/ FEATURE_DPM_LCLK = 3, +/*4*/ FEATURE_DPM_SOCCLK = 4, +/*5*/ FEATURE_DPM_UCLK = 5, +/*6*/ FEATURE_DPM_VCN = 6, +/*7*/ FEATURE_DPM_XGMI = 7, +/*8*/ FEATURE_DS_FCLK = 8, +/*9*/ FEATURE_DS_GFXCLK = 9, +/*10*/ FEATURE_DS_LCLK = 10, +/*11*/ FEATURE_DS_MP0CLK = 11, +/*12*/ FEATURE_DS_MP1CLK = 12, +/*13*/ FEATURE_DS_MPIOCLK = 13, +/*14*/ FEATURE_DS_SOCCLK = 14, +/*15*/ FEATURE_DS_VCN = 15, +/*16*/ FEATURE_APCC_DFLL = 16, +/*17*/ FEATURE_APCC_PLUS = 17, +/*18*/ FEATURE_PPT = 18, +/*19*/ FEATURE_TDC = 19, +/*20*/ FEATURE_THERMAL = 20, +/*21*/ FEATURE_SOC_PCC = 21, +/*22*/ FEATURE_PROCHOT = 22, +/*23*/ FEATURE_FDD_AID_HBM = 23, +/*24*/ FEATURE_FDD_AID_SOC = 24, +/*25*/ FEATURE_FDD_XCD_EDC = 25, +/*26*/ FEATURE_FDD_XCD_XVMIN = 26, +/*27*/ FEATURE_FW_CTF = 27, +/*28*/ FEATURE_SMU_CG = 28, +/*29*/ FEATURE_PSI7 = 29, +/*30*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 30, +/*31*/ FEATURE_SOC_DC_RTC = 31, +/*32*/ FEATURE_GFX_DC_RTC = 32, +/*33*/ FEATURE_DVM_MIN_PSM = 33, +/*34*/ FEATURE_PRC = 34, +/*35*/ FEATURE_PSM_SQ_THROTTLER = 35, +/*36*/ FEATURE_PIT = 36, +/*37*/ FEATURE_DVO = 37, +/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, + +/*39*/ NUM_FEATURES = 39 +} FEATURE_LIST_e; + +//enum for MPIO PCIe gen speed msgs +typedef enum { + PCIE_LINK_SPEED_INDEX_TABLE_RESERVED, + PCIE_LINK_SPEED_INDEX_TABLE_GEN1, + PCIE_LINK_SPEED_INDEX_TABLE_GEN2, + PCIE_LINK_SPEED_INDEX_TABLE_GEN3, + PCIE_LINK_SPEED_INDEX_TABLE_GEN4, + PCIE_LINK_SPEED_INDEX_TABLE_GEN5, + PCIE_LINK_SPEED_INDEX_TABLE_COUNT +} PCIE_LINK_SPEED_INDEX_TABLE_e; + +typedef enum { + GFX_GUARDBAND_OFFSET_0, + GFX_GUARDBAND_OFFSET_1, + GFX_GUARDBAND_OFFSET_2, + GFX_GUARDBAND_OFFSET_3, + GFX_GUARDBAND_OFFSET_4, + GFX_GUARDBAND_OFFSET_5, + GFX_GUARDBAND_OFFSET_6, + GFX_GUARDBAND_OFFSET_7, + GFX_GUARDBAND_OFFSET_COUNT +} GFX_GUARDBAND_OFFSET_e; + +typedef enum { + GFX_DVM_MARGINHI_0, + GFX_DVM_MARGINHI_1, + GFX_DVM_MARGINHI_2, + GFX_DVM_MARGINHI_3, + GFX_DVM_MARGINHI_4, + GFX_DVM_MARGINHI_5, + GFX_DVM_MARGINHI_6, + GFX_DVM_MARGINHI_7, + GFX_DVM_MARGINLO_0, + GFX_DVM_MARGINLO_1, + GFX_DVM_MARGINLO_2, + GFX_DVM_MARGINLO_3, + GFX_DVM_MARGINLO_4, + GFX_DVM_MARGINLO_5, + GFX_DVM_MARGINLO_6, + GFX_DVM_MARGINLO_7, + GFX_DVM_MARGIN_COUNT +} GFX_DVM_MARGIN_e; + +#define SMU_METRICS_TABLE_VERSION 0x12 + +typedef struct __attribute__((packed, aligned(4))) { + uint64_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; + + //FREQUENCY + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + + //FREQUENCY RANGE + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; + uint32_t GfxLockXCDMak; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; + + //XGMI Data tranfser size + uint64_t XgmiReadDataSizeAcc[8];//in KByte + uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + //PCIE BW Data and error count + uint32_t PcieBandwidth[4]; + uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[40]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated + + //Total App Clock Counter + uint64_t GfxclkBelowHostLimitPptAcc[8]; + uint64_t GfxclkBelowHostLimitThmAcc[8]; + uint64_t GfxclkBelowHostLimitTotalAcc[8]; + uint64_t GfxclkLowUtilizationAcc[8]; +} MetricsTable_t; + +#define SMU_VF_METRICS_TABLE_MASK (1 << 31) +#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK) + +typedef struct __attribute__((packed, aligned(4))) { + uint32_t AccumulationCounter; + uint32_t InstGfxclk_TargFreq; + uint64_t AccGfxclk_TargFreq; + uint64_t AccGfxRsmuDpm_Busy; + uint64_t AccGfxclkBelowHostLimitPpt; + uint64_t AccGfxclkBelowHostLimitThm; + uint64_t AccGfxclkBelowHostLimitTotal; + uint64_t AccGfxclkLowUtilization; +} VfMetricsTable_t; + +/* FRU product information */ +typedef struct __attribute__((packed, aligned(4))) { + uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN]; + uint8_t Name[PRODUCT_NAME_LEN]; + uint8_t Serial[PRODUCT_SERIAL_LEN]; + uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN]; + uint8_t FruId[PRODUCT_FRU_ID_LEN]; +} FRUProductInfo_t; + +#pragma pack(push, 4) +typedef struct { + //FRU PRODUCT INFO + FRUProductInfo_t ProductInfo; + + //POWER + uint32_t MaxSocketPowerLimit; + + //FREQUENCY RANGE + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; +} StaticMetricsTable_t; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h new file mode 100644 index 000000000000..e1f490b6ce64 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -0,0 +1,143 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_12_PPSMC_H +#define SMU_13_0_12_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GfxDriverReset 0x3 +#define PPSMC_MSG_GetDriverIfVersion 0x4 +#define PPSMC_MSG_EnableAllSmuFeatures 0x5 +#define PPSMC_MSG_DisableAllSmuFeatures 0x6 +#define PPSMC_MSG_RequestI2cTransaction 0x7 +#define PPSMC_MSG_GetMetricsVersion 0x8 +#define PPSMC_MSG_GetMetricsTable 0x9 +#define PPSMC_MSG_GetEccInfoTable 0xA +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC +#define PPSMC_MSG_SetDriverDramAddrHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrLow 0xE +#define PPSMC_MSG_SetToolsDramAddrHigh 0xF +#define PPSMC_MSG_SetToolsDramAddrLow 0x10 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12 +#define PPSMC_MSG_SetSoftMinByFreq 0x13 +#define PPSMC_MSG_SetSoftMaxByFreq 0x14 +#define PPSMC_MSG_GetMinDpmFreq 0x15 +#define PPSMC_MSG_GetMaxDpmFreq 0x16 +#define PPSMC_MSG_GetDpmFreqByIndex 0x17 +#define PPSMC_MSG_SetPptLimit 0x18 +#define PPSMC_MSG_GetPptLimit 0x19 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A +#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B +#define PPSMC_MSG_DramLogSetDramSize 0x1C +#define PPSMC_MSG_GetDebugData 0x1D +#define PPSMC_MSG_HeavySBR 0x1E +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F +#define PPSMC_MSG_DFCstateControl 0x20 +#define PPSMC_MSG_GetGmiPwrDnHyst 0x21 +#define PPSMC_MSG_SetGmiPwrDnHyst 0x22 +#define PPSMC_MSG_GmiPwrDnControl 0x23 +#define PPSMC_MSG_EnterGfxoff 0x24 +#define PPSMC_MSG_ExitGfxoff 0x25 +#define PPSMC_MSG_EnableDeterminism 0x26 +#define PPSMC_MSG_DisableDeterminism 0x27 +#define PPSMC_MSG_DumpSTBtoDram 0x28 +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29 +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D +#define PPSMC_MSG_GfxDriverResetRecovery 0x2E +#define PPSMC_MSG_TriggerVFFLR 0x2F +#define PPSMC_MSG_SetSoftMinGfxClk 0x30 +#define PPSMC_MSG_SetSoftMaxGfxClk 0x31 +#define PPSMC_MSG_GetMinGfxDpmFreq 0x32 +#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33 +#define PPSMC_MSG_PrepareForDriverUnload 0x34 +#define PPSMC_MSG_ReadThrottlerLimit 0x35 +#define PPSMC_MSG_QueryValidMcaCount 0x36 +#define PPSMC_MSG_McaBankDumpDW 0x37 +#define PPSMC_MSG_GetCTFLimit 0x38 +#define PPSMC_MSG_ClearMcaOnRead 0x39 +#define PPSMC_MSG_QueryValidMcaCeCount 0x3A +#define PPSMC_MSG_McaBankCeDumpDW 0x3B +#define PPSMC_MSG_SelectPLPDMode 0x40 +#define PPSMC_MSG_PmLogReadSample 0x41 +#define PPSMC_MSG_PmLogGetTableVersion 0x42 +#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 +#define PPSMC_MSG_SetThrottlingPolicy 0x44 +#define PPSMC_MSG_SetPhaseDetectCSBWThreshold 0x45 +#define PPSMC_MSG_SetPhaseDetectFreqHigh 0x46 +#define PPSMC_MSG_SetPhaseDetectFreqLow 0x47 +#define PPSMC_MSG_SetPhaseDetectDownHysterisis 0x48 +#define PPSMC_MSG_SetPhaseDetectAlphaX1e6 0x49 +#define PPSMC_MSG_SetPhaseDetectOnOff 0x4A +#define PPSMC_MSG_GetPhaseDetectResidency 0x4B +#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C +#define PPSMC_MSG_ResetSDMA 0x4D +#define PPSMC_MSG_GetRasTableVersion 0x4E +#define PPSMC_MSG_GetRmaStatus 0x4F +#define PPSMC_MSG_GetErrorCount 0x50 +#define PPSMC_MSG_GetBadPageCount 0x51 +#define PPSMC_MSG_GetBadPageInfo 0x52 +#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53 +#define PPSMC_MSG_SetTimestampLoHi 0x54 +#define PPSMC_MSG_GetTimestampLoHi 0x55 +#define PPSMC_MSG_GetRasPolicy 0x56 +#define PPSMC_MSG_DumpErrorRecord 0x57 +#define PPSMC_MSG_EraseRasTable 0x58 +#define PPSMC_MSG_GetStaticMetricsTable 0x59 +#define PPSMC_Message_Count 0x5A + +//PPSMC Reset Types for driver msg argument +#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 +#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2 +#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3 + +//PPSMC Reset Types for driver msg argument +#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1 +#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2 + +//CTF/Throttle Limit types +#define PPSMC_AID_THM_TYPE 0x1 +#define PPSMC_CCD_THM_TYPE 0x2 +#define PPSMC_XCD_THM_TYPE 0x3 +#define PPSMC_HBM_THM_TYPE 0x4 + +//PLPD modes +#define PPSMC_PLPD_MODE_DEFAULT 0x1 +#define PPSMC_PLPD_MODE_OPTIMIZED 0x2 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_MSG; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 7b812b9994d7..01790a927930 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -34,6 +34,8 @@ #define NUM_PCIE_BITRATES 4 #define NUM_XGMI_BITRATES 4 #define NUM_XGMI_WIDTHS 3 +#define NUM_SOC_P2S_TABLES 3 +#define NUM_TDP_GROUPS 4 typedef enum { /*0*/ FEATURE_DATA_CALCULATION = 0, @@ -80,8 +82,10 @@ typedef enum { /*41*/ FEATURE_CXL_QOS = 41, /*42*/ FEATURE_SOC_DC_RTC = 42, /*43*/ FEATURE_GFX_DC_RTC = 43, +/*44*/ FEATURE_DVM_MIN_PSM = 44, +/*45*/ FEATURE_PRC = 45, -/*44*/ NUM_FEATURES = 44 +/*46*/ NUM_FEATURES = 46 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -123,8 +127,9 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xB +#define SMU_METRICS_TABLE_VERSION 0x11 +// Unified metrics table for smu_v13_0_6 typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -223,8 +228,26 @@ typedef struct __attribute__((packed, aligned(4))) { // VCN/JPEG ACTIVITY uint32_t VcnBusy[4]; uint32_t JpegBusy[32]; -} MetricsTableX_t; + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated + + //Total App Clock Counter + uint64_t GfxclkBelowHostLimitPptAcc[8]; + uint64_t GfxclkBelowHostLimitThmAcc[8]; + uint64_t GfxclkBelowHostLimitTotalAcc[8]; + uint64_t GfxclkLowUtilizationAcc[8]; +} MetricsTableV0_t; + +// Metrics table for smu_v13_0_6 APUS typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -315,15 +338,134 @@ typedef struct __attribute__((packed, aligned(4))) { // VCN/JPEG ACTIVITY uint32_t VcnBusy[4]; uint32_t JpegBusy[32]; -} MetricsTableA_t; +} MetricsTableV1_t; + +// Metrics table for smu_v13_0_12 +typedef struct __attribute__((packed, aligned(4))) { + uint64_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t MaxSocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t CcdEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; -#define SMU_VF_METRICS_TABLE_VERSION 0x3 + //FREQUENCY + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + + //FREQUENCY RANGE + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketC0ResidencyAcc; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; + uint32_t GfxLockXCDMak; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; + + //XGMI Data tranfser size + uint64_t XgmiReadDataSizeAcc[8];//in KByte + uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + //PCIE BW Data and error count + uint32_t PcieBandwidth[4]; + uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[32]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated + + //Total App Clock Counter + uint64_t GfxclkBelowHostLimitAcc[8]; +} MetricsTableV2_t; + +#define SMU_VF_METRICS_TABLE_VERSION 0x5 typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; uint32_t InstGfxclk_TargFreq; uint64_t AccGfxclk_TargFreq; uint64_t AccGfxRsmuDpm_Busy; + uint64_t AccGfxclkBelowHostLimit; } VfMetricsTable_t; +#pragma pack(push, 4) +typedef struct { + // Telemetry + uint32_t InputTelemetryVoltageInmV; + // General info + uint32_t pldmVersion[2]; +} StaticMetricsTable_t; +#pragma pack(pop) + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 86758051cb93..41f268313613 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -92,7 +92,11 @@ #define PPSMC_MSG_McaBankCeDumpDW 0x3B #define PPSMC_MSG_SelectPLPDMode 0x40 #define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 -#define PPSMC_Message_Count 0x44 +#define PPSMC_MSG_SetThrottlingPolicy 0x44 +#define PPSMC_MSG_ResetSDMA 0x4D +#define PPSMC_MSG_ResetVCN 0x4E +#define PPSMC_MSG_GetStaticMetricsTable 0x59 +#define PPSMC_Message_Count 0x5A //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index c4dc5881d8df..e7f5ef49049f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -106,8 +106,8 @@ #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< -#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache -#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache +#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control +#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG #define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages /** @}*/ diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h new file mode 100644 index 000000000000..87ca5ceb1ece --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h @@ -0,0 +1,150 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V14_0_2_PPSMC_H +#define SMU_V14_0_2_PPSMC_H + +#define PPSMC_VERSION 0x1 + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC +#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x15 +#define PPSMC_MSG_ExitBaco 0x16 +#define PPSMC_MSG_ArmD3 0x17 +#define PPSMC_MSG_BacoAudioD3PME 0x18 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x19 +#define PPSMC_MSG_SetSoftMaxByFreq 0x1A +#define PPSMC_MSG_SetHardMinByFreq 0x1B +#define PPSMC_MSG_SetHardMaxByFreq 0x1C +#define PPSMC_MSG_GetMinDpmFreq 0x1D +#define PPSMC_MSG_GetMaxDpmFreq 0x1E +#define PPSMC_MSG_GetDpmFreqByIndex 0x1F +#define PPSMC_MSG_OverridePcieParameters 0x20 + +//DramLog Set DramAddr +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x22 +#define PPSMC_MSG_DramLogSetDramSize 0x23 +#define PPSMC_MSG_SetWorkloadMask 0x24 + +#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed +#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x28 +#define PPSMC_MSG_DisallowGfxOff 0x29 +#define PPSMC_MSG_PowerUpVcn 0x2A +#define PPSMC_MSG_PowerDownVcn 0x2B +#define PPSMC_MSG_PowerUpJpeg 0x2C +#define PPSMC_MSG_PowerDownJpeg 0x2D + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x2E + +//Set SystemVirtual DramAddrHigh +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31 +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x32 +#define PPSMC_MSG_GetPptLimit 0x33 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x34 +#define PPSMC_MSG_NotifyPowerSource 0x35 + +//BTC +#define PPSMC_MSG_RunDcBtc 0x36 + +// 0x37 + +//Others +#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed +#define PPSMC_MSG_SetFwDstatesMask 0x39 +#define PPSMC_MSG_SetThrottlerMask 0x3A + +#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B + +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddress 0x3E +#define PPSMC_MSG_DummyUndefined 0x3F +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 +#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 + +#define PPSMC_MSG_UseProfilingMode 0x42 +#define PPSMC_MSG_AllowGfxDcs 0x43 +#define PPSMC_MSG_DisallowGfxDcs 0x44 +#define PPSMC_MSG_EnableAudioStutterWA 0x45 +#define PPSMC_MSG_PowerUpUmsch 0x46 +#define PPSMC_MSG_PowerDownUmsch 0x47 +#define PPSMC_MSG_SetDcsArch 0x48 +#define PPSMC_MSG_TriggerVFFLR 0x49 +#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A +#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B +#define PPSMC_MSG_SetPriorityDeltaGain 0x4C +#define PPSMC_MSG_AllowIHHostInterrupt 0x4D +#define PPSMC_MSG_EnableShadowDpm 0x4E +#define PPSMC_MSG_Mode3Reset 0x4F +#define PPSMC_MSG_SetDriverDramAddr 0x50 +#define PPSMC_MSG_SetToolsDramAddr 0x51 +#define PPSMC_MSG_TransferTableSmu2DramWithAddr 0x52 +#define PPSMC_MSG_TransferTableDram2SmuWithAddr 0x53 +#define PPSMC_MSG_GetAllRunningSmuFeatures 0x54 +#define PPSMC_MSG_GetSvi3Voltage 0x55 +#define PPSMC_MSG_UpdatePolicy 0x56 +#define PPSMC_MSG_ExtPwrConnSupport 0x57 +#define PPSMC_MSG_PreloadSwPstateForUclkOverDrive 0x58 +#define PPSMC_Message_Count 0x59 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index af427cc7dbb8..eefdaa0b5df6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -272,7 +272,13 @@ __SMU_DUMMY_MAP(SetSoftMinVpe), \ __SMU_DUMMY_MAP(GetMetricsVersion), \ __SMU_DUMMY_MAP(EnableUCLKShadow), \ - __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), + __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \ + __SMU_DUMMY_MAP(SetThrottlingPolicy), \ + __SMU_DUMMY_MAP(MALLPowerController), \ + __SMU_DUMMY_MAP(MALLPowerState), \ + __SMU_DUMMY_MAP(ResetSDMA), \ + __SMU_DUMMY_MAP(ResetVCN), \ + __SMU_DUMMY_MAP(GetStaticMetricsTable), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -310,6 +316,8 @@ enum smu_clk_type { SMU_OD_ACOUSTIC_TARGET, SMU_OD_FAN_TARGET_TEMPERATURE, SMU_OD_FAN_MINIMUM_PWM, + SMU_OD_FAN_ZERO_RPM_ENABLE, + SMU_OD_FAN_ZERO_RPM_STOP_TEMP, SMU_CLK_COUNT, }; @@ -349,6 +357,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(DS_FCLK), \ __SMU_DUMMY_MAP(DS_MP1CLK), \ __SMU_DUMMY_MAP(DS_MP0CLK), \ + __SMU_DUMMY_MAP(DS_MPIOCLK), \ __SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \ __SMU_DUMMY_MAP(DPM_GFX_PACE), \ __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \ @@ -436,7 +445,17 @@ enum smu_clk_type { __SMU_DUMMY_MAP(BACO_CG), \ __SMU_DUMMY_MAP(SOC_CG), \ __SMU_DUMMY_MAP(LOW_POWER_DCNCLKS), \ - __SMU_DUMMY_MAP(WHISPER_MODE), + __SMU_DUMMY_MAP(WHISPER_MODE), \ + __SMU_DUMMY_MAP(EDC_PWRBRK), \ + __SMU_DUMMY_MAP(SOC_EDC_XVMIN), \ + __SMU_DUMMY_MAP(GFX_PSM_DIDT), \ + __SMU_DUMMY_MAP(APT_ALL_ENABLE), \ + __SMU_DUMMY_MAP(APT_SQ_THROTTLE), \ + __SMU_DUMMY_MAP(APT_PF_DCS), \ + __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ + __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ + __SMU_DUMMY_MAP(FAN_ABNORMAL), \ + __SMU_DUMMY_MAP(PIT), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT @@ -445,4 +464,11 @@ enum smu_feature_mask { SMU_FEATURE_COUNT, }; +/* Message category flags */ +#define SMU_MSG_VF_FLAG (1U << 0) +#define SMU_MSG_RAS_PRI (1U << 1) + +/* Firmware capability flags */ +#define SMU_FW_CAP_RAS_PRI (1U << 0) + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index a0e5ad0381d6..56ae555bb52a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v11_0_baco_is_support(struct smu_context *smu); +int smu_v11_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); @@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu, enum smu_clk_type clk_type, struct smu_11_0_dpm_table *single_dpm_table); -int smu_v11_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value); - int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu); uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h index 1ad2dff71090..0886d8cffbd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h @@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu); int smu_v12_0_mode2_reset(struct smu_context *smu); int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v12_0_set_driver_table_location(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index fbd57fa1a004..4263798d716b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -53,6 +53,10 @@ #define SMU_13_VCLK_SHIFT 16 +#define SMUQ10_TO_UINT(x) ((x) >> 10) +#define SMUQ10_FRAC(x) ((x) & 0x3ff) +#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) + extern const int pmfw_decoded_link_speed[5]; extern const int pmfw_decoded_link_width[7]; @@ -107,6 +111,8 @@ struct smu_13_0_dpm_context { struct smu_13_0_dpm_tables dpm_tables; uint32_t workload_policy_mask; uint32_t dcef_min_ds_clk; + uint64_t caps; + uint32_t board_volt; }; enum smu_13_0_power_state { @@ -157,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu); int smu_v13_0_system_features_control(struct smu_context *smu, bool en); -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count); - int smu_v13_0_set_allowed_mask(struct smu_context *smu); int smu_v13_0_notify_display_change(struct smu_context *smu); @@ -178,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu); int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value); -int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); - -int -smu_v13_0_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request - *clock_req); - uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu); @@ -210,7 +207,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v13_0_baco_is_support(struct smu_context *smu); +int smu_v13_0_get_bamaco_support(struct smu_context *smu); int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); @@ -219,12 +216,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); - -int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v13_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); @@ -255,7 +247,8 @@ int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v13_0_set_jpeg_enable(struct smu_context *smu, bool enable); @@ -298,5 +291,12 @@ int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable); int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, struct freq_band_range *exclusion_ranges); + +int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); + +void smu_v13_0_interrupt_work(struct smu_context *smu); +void smu_v13_0_reset_custom_level(struct smu_context *smu); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 4af1985ae446..29a4583db873 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E #define FEATURE_MASK(feature) (1ULL << feature) @@ -39,12 +39,25 @@ #define MP1_SRAM 0x03c00004 /* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028 +#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_PUB_CTRL 0x3010d10 #define MAX_DPM_LEVELS 16 #define MAX_PCIE_CONF 3 +#define SMU14_TOOL_SIZE 0x19000 + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_MEM 5 + +extern const int decoded_link_speed[5]; +extern const int decoded_link_width[8]; + +#define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx]) +#define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx]) + struct smu_14_0_max_sustainable_clocks { uint32_t display_clock; uint32_t phy_clock; @@ -160,7 +173,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu); int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq); -bool smu_v14_0_baco_is_support(struct smu_context *smu); +int smu_v14_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu); @@ -173,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -197,7 +210,8 @@ int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable); @@ -227,5 +241,9 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu); +int smu_v14_0_enable_thermal_alert(struct smu_context *smu); + +int smu_v14_0_disable_thermal_alert(struct smu_context *smu); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h new file mode 100644 index 000000000000..75c921e87360 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h @@ -0,0 +1,204 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_14_0_2_PPTABLE_H +#define SMU_14_0_2_PPTABLE_H + + +#pragma pack(push, 1) + +#define SMU_14_0_2_TABLE_FORMAT_REVISION 23 +#define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1 + +// POWERPLAYTABLE::ulPlatformCaps +#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. +#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. +#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. +#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. +#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED. +#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive. + +// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD +#define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1 +#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 + +enum SMU_14_0_2_OD_SW_FEATURE_CAP +{ + SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODCAP_POWER_MODE = 1, + SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2, + SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3, + SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4, + SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5, + SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8, + SMU_14_0_2_ODCAP_COUNT = 9, +}; + +enum SMU_14_0_2_OD_SW_FEATURE_ID +{ + SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM + SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode + SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK + SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning + SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning + SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning + SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC +}; + +#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features + +enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID +{ + SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODSETTING_POWER_MODE = 1, + SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2, + SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3, + SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4, + SMU_14_0_2_ODSETTING_ACTIMING = 5, + SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8, + SMU_14_0_2_ODSETTING_COUNT = 9, +}; +#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings + +enum SMU_14_0_2_PWRMODE_SETTING +{ + SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0, + SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE, + SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO, + SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, + SMU_14_0_2_PMSETTING_COUNT +}; +#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings + +enum SMU_14_0_2_overdrive_table_id +{ + SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0, + SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1, + SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2, +}; + +struct smu_14_0_2_overdrive_table +{ + uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; // Zero filled field reserved for future use + uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags + int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings + int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings + int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings +}; + +enum smu_14_0_3_pptable_source { + PPTABLE_SOURCE_IFWI = 0, + PPTABLE_SOURCE_DRIVER_HARDCODED = 1, + PPTABLE_SOURCE_PPGEN_REGISTRY = 2, + PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY, +}; + +struct smu_14_0_2_powerplay_table +{ + struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. + uint8_t table_revision; // PPGen use only: table_revision = 3 + uint8_t pptable_source; // PPGen UI dropdown box + uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) + uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. + uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table). + uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t. + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t + uint16_t pmfw_board_table_size; // The size of BoardTable_t. + uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. + uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. + uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base + uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base + uint16_t format_id; // PPGen use only: PPTable for different ASICs. + uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps + + uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. + uint16_t software_shutdown_temp; + + uint8_t reserve[143]; // Zero filled field reserved for future use + + struct smu_14_0_2_overdrive_table overdrive_table; + + PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes +}; + +enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP { + SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0, + SMU_14_0_2_CUSTOM_ODCAP_COUNT +}; + +enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID { + SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0, + SMU_14_0_2_CUSTOM_ODSETTING_COUNT, +}; + +struct smu_14_0_2_custom_overdrive_table { + uint8_t revision; + uint8_t reserve[3]; + uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT]; + int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; + int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; + int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT]; +}; + +struct smu_14_0_3_custom_powerplay_table { + uint8_t custom_table_revision; + uint16_t custom_table_size; + uint16_t custom_sku_table_offset; + uint32_t custom_platform_caps; + uint16_t software_shutdown_temp; + struct smu_14_0_2_custom_overdrive_table custom_overdrive_table; + uint32_t reserve[8]; + CustomSkuTable_t custom_sku_table_pmfw; +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 0c2d04f978ac..9ad46f545d15 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -283,9 +283,29 @@ static int arcturus_tables_init(struct smu_context *smu) return 0; } +static int arcturus_select_plpd_policy(struct smu_context *smu, int level) +{ + /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */ + if (smu->smc_fw_version < 0x00361700) { + dev_err(smu->adev->dev, + "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n"); + return -EINVAL; + } + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else + return -EINVAL; +} + static int arcturus_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), GFP_KERNEL); @@ -293,6 +313,20 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = arcturus_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -403,6 +437,14 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } + /* XGMI PLPD is supported by 54.23.0 and onwards */ + if (smu->smc_fw_version < 0x00361700) { + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_policies->policy_mask &= + ~BIT(PP_PM_POLICY_XGMI_PLPD); + } + return 0; } @@ -1225,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (!speed || speed > UINT_MAX/8) + return -EINVAL; + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), @@ -1302,8 +1347,12 @@ static int arcturus_get_power_limit(struct smu_context *smu, *default_power_limit = power_limit; if (max_power_limit) *max_power_limit = power_limit; + /* + * No lower bound is imposed on the limit. Any unreasonable limit set + * will result in frequent throttling. + */ if (min_power_limit) - *min_power_limit = power_limit; + *min_power_limit = 0; return 0; } @@ -1399,93 +1448,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, return size; } -static int arcturus_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define ARCTURUS_CUSTOM_PARAMS_COUNT 10 +#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type = 0; - uint32_t profile_mode = input[size]; - int ret = 0; + int ret, idx; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } + idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_UseRlcBusy = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Uclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_UseRlcBusy = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu->smc_fw_version >= 0x360d00)) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_UseRlcBusy = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Uclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_UseRlcBusy = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - } + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), - true); +static int arcturus_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (smu->smc_fw_version < 0x360d00) + return -EINVAL; + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = arcturus_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - } - - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT - * Not all profile modes are supported on arcturus. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); - return -EINVAL; + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE); } ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); if (ret) { - dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int arcturus_set_performance_level(struct smu_context *smu, @@ -1512,437 +1588,6 @@ static int arcturus_set_performance_level(struct smu_context *smu, return smu_v11_0_set_performance_level(smu, level); } -static void arcturus_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]); - } - - dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit); - dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit); - dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit); - dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit); - dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit); - - dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding); - - dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]); - dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]); - dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]); - dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]); - dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]); - dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]); - dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456); - - dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm); - dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge); - dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot); - dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem); - dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm); - - dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv); - - dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1); - dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2); - - dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow); - dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping); - dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping); - - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent); - dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset); - dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem); - - dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent); - dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset); - dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput); - - dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .Speed = %d\n", - pptable->I2cControllers[i].Speed); - } - - dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth); - - dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - -} - static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; @@ -1955,7 +1600,9 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -2175,27 +1822,6 @@ static int arcturus_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int arcturus_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */ - if (smu->smc_fw_version < 0x00361700) { - dev_err(smu->adev->dev, "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n"); - return -EINVAL; - } - - if (mode == XGMI_PLPD_DEFAULT) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 1, NULL); - else if (mode == XGMI_PLPD_DISALLOW) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, NULL); - else - return -EINVAL; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -2339,8 +1965,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, .set_performance_level = arcturus_set_performance_level, - /* debug (internal used) */ - .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable, @@ -2387,13 +2011,12 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = smu_v11_0_baco_enter, .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_df_cstate = arcturus_set_df_cstate, - .select_xgmi_plpd_policy = arcturus_select_xgmi_plpd_policy, .log_thermal_throttling_event = arcturus_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 836b1df79928..7fad5dfb39c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1135,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) return 0; } -static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int navi10_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1219,19 +1221,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, value); } -static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) { PPTable_t *pptable = smu->smu_table.driver_pptable; DpmDescriptor_t *dpm_desc = NULL; - uint32_t clk_index = 0; + int clk_index = 0; clk_index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, clk_type); + if (clk_index < 0) + return clk_index; + dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0; + return dpm_desc->SnapToDiscrete == 0 ? 1 : 0; } static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) @@ -1287,7 +1292,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu, if (ret) return ret; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); @@ -1382,8 +1391,6 @@ static int navi10_emit_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1496,7 +1503,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (ret) return size; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) @@ -1583,8 +1594,6 @@ static int navi10_print_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1665,7 +1674,11 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ - if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (ret) { soft_max_level = (soft_max_level >= 1 ? 1 : 0); soft_min_level = (soft_min_level >= 1 ? 1 : 0); } @@ -1678,7 +1691,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return 0; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return 0; break; @@ -1978,7 +1991,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor.Mem_FPS, activity_monitor.Mem_MinFreqStep, activity_monitor.Mem_MinActiveFreqType, @@ -1993,81 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) return size; } -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define NAVI10_CUSTOM_PARAMS_COUNT 10 +#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3 +#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int navi10_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor.Gfx_FPS = input[idx + 1]; + activity_monitor.Gfx_MinFreqStep = input[idx + 2]; + activity_monitor.Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor.Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor.Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor.Gfx_BoosterFreq = input[idx + 6]; + activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor.Soc_FPS = input[idx + 1]; + activity_monitor.Soc_MinFreqStep = input[idx + 2]; + activity_monitor.Soc_MinActiveFreqType = input[idx + 3]; + activity_monitor.Soc_MinActiveFreq = input[idx + 4]; + activity_monitor.Soc_BoosterFreqType = input[idx + 5]; + activity_monitor.Soc_BoosterFreq = input[idx + 6]; + activity_monitor.Soc_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor.Mem_FPS = input[idx + 1]; + activity_monitor.Mem_MinFreqStep = input[idx + 2]; + activity_monitor.Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor.Mem_MinActiveFreq = input[idx + 4]; + activity_monitor.Mem_BoosterFreqType = input[idx + 5]; + activity_monitor.Mem_BoosterFreq = input[idx + 6]; + activity_monitor.Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9]; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + return ret; +} - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } +static int navi10_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_MinFreqStep = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor.Soc_FPS = input[1]; - activity_monitor.Soc_MinFreqStep = input[2]; - activity_monitor.Soc_MinActiveFreqType = input[3]; - activity_monitor.Soc_MinActiveFreq = input[4]; - activity_monitor.Soc_BoosterFreqType = input[5]; - activity_monitor.Soc_BoosterFreq = input[6]; - activity_monitor.Soc_PD_Data_limit_c = input[7]; - activity_monitor.Soc_PD_Data_error_coeff = input[8]; - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memlk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_MinFreqStep = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor), true); + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT) + return -EINVAL; + idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = navi10_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -3538,7 +3592,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 1f18b61884f3..115e3fa456bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1152,22 +1152,20 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) return 0; } -static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; - int i, ret = 0; + int ret = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - /* vcn dpm on is a prerequisite for vcn power gate messages */ - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - 0x10000 * i, NULL); - if (ret) - return ret; - } + if (adev->vcn.harvest_config & (1 << inst)) + return ret; + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + 0x10000 * inst, NULL); } return ret; @@ -1469,7 +1467,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto forec_level_out; break; @@ -1691,7 +1689,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor->Mem_FPS, activity_monitor->Mem_MinFreqStep, activity_monitor->Mem_MinActiveFreqType, @@ -1706,84 +1704,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * return size; } -static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10 +#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3 +#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinFreqStep = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_BoosterFreq = input[idx + 6]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Socclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinFreqStep = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_BoosterFreq = input[idx + 6]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9]; + } + idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Memclk */ + activity_monitor->Mem_FPS = input[idx + 1]; + activity_monitor->Mem_MinFreqStep = input[idx + 2]; + activity_monitor->Mem_MinActiveFreqType = input[idx + 3]; + activity_monitor->Mem_MinActiveFreq = input[idx + 4]; + activity_monitor->Mem_BoosterFreqType = input[idx + 5]; + activity_monitor->Mem_BoosterFreq = input[idx + 6]; + activity_monitor->Mem_PD_Data_limit_c = input[idx + 7]; + activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8]; + activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinFreqStep = input[2]; - activity_monitor->Gfx_MinActiveFreqType = input[3]; - activity_monitor->Gfx_MinActiveFreq = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_BoosterFreq = input[6]; - activity_monitor->Gfx_PD_Data_limit_c = input[7]; - activity_monitor->Gfx_PD_Data_error_coeff = input[8]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinFreqStep = input[2]; - activity_monitor->Fclk_MinActiveFreqType = input[3]; - activity_monitor->Fclk_MinActiveFreq = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_BoosterFreq = input[6]; - activity_monitor->Fclk_PD_Data_limit_c = input[7]; - activity_monitor->Fclk_PD_Data_error_coeff = input[8]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Memlk */ - activity_monitor->Mem_FPS = input[1]; - activity_monitor->Mem_MinFreqStep = input[2]; - activity_monitor->Mem_MinActiveFreqType = input[3]; - activity_monitor->Mem_MinActiveFreq = input[4]; - activity_monitor->Mem_BoosterFreqType = input[5]; - activity_monitor->Mem_BoosterFreq = input[6]; - activity_monitor->Mem_PD_Data_limit_c = input[7]; - activity_monitor->Mem_PD_Data_error_coeff = input[8]; - activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; - break; - } +static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = sienna_cichlid_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2487,1274 +2527,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) return val != 0x0; } -static void beige_goby_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_beige_goby_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - -static void sienna_cichlid_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(11, 0, 13)) { - beige_goby_dump_pptable(smu); - return; - } - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { @@ -4317,11 +3089,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, return 0; } -static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int sienna_cichlid_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -4391,7 +3158,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch, .get_power_limit = sienna_cichlid_get_power_limit, .update_pcie_parameters = sienna_cichlid_update_pcie_parameters, - .dump_pptable = sienna_cichlid_dump_pptable, .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, .fini_microcode = smu_v11_0_fini_microcode, @@ -4431,7 +3197,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = sienna_cichlid_baco_enter, .baco_exit = sienna_cichlid_baco_exit, .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, @@ -4458,7 +3224,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings, .set_config_table = sienna_cichlid_set_config_table, .get_unique_id = sienna_cichlid_get_unique_id, - .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported, .mode2_reset = sienna_cichlid_mode2_reset, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index f6545093bfc1..78e4186d06cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -93,8 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu) int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[30]; - char fw_name[SMU_FW_NAME_LEN]; + char ucode_prefix[25]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -106,10 +105,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -230,6 +227,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; break; case IP_VERSION(11, 0, 12): @@ -474,10 +472,11 @@ int smu_v11_0_init_power(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; - size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) == - IP_VERSION(11, 5, 0) ? - sizeof(struct smu_11_5_power_context) : - sizeof(struct smu_11_0_power_context); + u32 ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + size_t size = ((ip_version == IP_VERSION(11, 5, 0)) || + (ip_version == IP_VERSION(11, 5, 2))) ? + sizeof(struct smu_11_5_power_context) : + sizeof(struct smu_11_0_power_context); smu_power->power_context = kzalloc(size, GFP_KERNEL); if (!smu_power->power_context) @@ -734,6 +733,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) */ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) || amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) return 0; @@ -1113,6 +1113,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 2): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -1203,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement @@ -1557,23 +1558,27 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } -bool smu_v11_0_baco_is_support(struct smu_context *smu) +int smu_v11_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) @@ -1603,7 +1608,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): - if (amdgpu_runtime_pm == 2) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, D3HOT_BAMACO_SEQUENCE, @@ -1616,7 +1621,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) break; default: if (!ras || !adev->ras_enabled || - adev->gmc.xgmi.pending_reset) { + (adev->init_lvl->level == + AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); @@ -1763,7 +1769,8 @@ failed: int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1778,7 +1785,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1786,7 +1796,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1854,6 +1867,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, uint32_t mclk_min = 0, mclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; int ret = 0; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1873,6 +1887,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, mclk_max = mem_table->max; socclk_min = soc_table->min; socclk_max = soc_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1905,13 +1920,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; } @@ -1920,7 +1937,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; } @@ -1929,7 +1947,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; } @@ -2040,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu, return 0; } -int smu_v11_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value) -{ - uint32_t level_count = 0; - int ret = 0; - - if (!min_value && !max_value) - return -EINVAL; - - if (min_value) { - /* by default, level 0 clock value as min value */ - ret = smu_v11_0_get_dpm_freq_by_index(smu, - clk_type, - 0, - min_value); - if (ret) - return ret; - } - - if (max_value) { - ret = smu_v11_0_get_dpm_level_count(smu, - clk_type, - &level_count); - if (ret) - return ret; - - ret = smu_v11_0_get_dpm_freq_by_index(smu, - clk_type, - level_count - 1, - max_value); - if (ret) - return ret; - } - - return ret; -} - int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index da1f43999d09..a55ea76d7399 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -301,7 +303,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / @@ -459,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu) return smu_v11_0_init_smc_tables(smu); } -static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -976,6 +980,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, } } if (min) { + ret = vangogh_get_profiling_clk_mask(smu, + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK, + NULL, + NULL, + &mclk_mask, + &fclk_mask, + &soc_mask); + if (ret) + goto failed; + + vclk_mask = dclk_mask = 0; + switch (clk_type) { case SMU_UCLK: case SMU_MCLK: @@ -1040,48 +1056,34 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, return size; } -static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int vangogh_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; + u32 backend_workload_mask = 0; + int ret; - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", - profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); + dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n", + workload_mask); return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { int ret = 0; @@ -1287,7 +1289,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1323,7 +1325,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; @@ -1342,7 +1344,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); if (ret) return ret; @@ -1350,7 +1352,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); if (ret) return ret; @@ -1358,7 +1360,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); if (ret) return ret; @@ -1366,7 +1368,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); if (ret) return ret; @@ -1507,6 +1509,12 @@ static int vangogh_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -2444,6 +2452,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, start, &residency); + if (ret) + return ret; if (!start) adev->gfx.gfx_off_residency = residency; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 8908bbb3ff1f..9481f897432d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -585,8 +585,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, } switch (clk_type) { - case SMU_GFXCLK: - case SMU_SCLK: case SMU_SOCCLK: case SMU_MCLK: case SMU_DCEFCLK: @@ -647,7 +645,9 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context return pm_type; } -static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int renoir_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -709,7 +709,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -742,7 +742,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -864,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; } -static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +static int renoir_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) { - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } + int ret; + u32 backend_workload_mask = 0; - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - /* - * TODO: If some case need switch to powersave/default power mode - * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. - */ - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); - return -EINVAL; - } + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); + backend_workload_mask, + NULL); if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); + dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n", + workload_mask); return ret; } - smu->power_profile_mode = profile_mode; - - return 0; + return ret; } static int renoir_set_peak_clock_by_device(struct smu_context *smu) @@ -913,7 +896,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -921,18 +904,63 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; return ret; } +static int renior_set_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type) +{ + int ret = 0; + uint32_t sclk = 0, socclk = 0, fclk = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + sclk = RENOIR_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk, NULL); + break; + case SMU_SOCCLK: + socclk = RENOIR_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk); + break; + case SMU_FCLK: + case SMU_MCLK: + fclk = RENOIR_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk, NULL); + break; + default: + ret = -EINVAL; + break; + } + + if (sclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false); + + if (socclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false); + + if (fclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false); + + return ret; +} + static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { int ret = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1012,15 +1040,9 @@ static int renoir_set_performance_level(struct smu_context *smu, smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - ret = renoir_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - renoir_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - renoir_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + renior_set_dpm_profile_freq(smu, level, SMU_SCLK); + renior_set_dpm_profile_freq(smu, level, SMU_MCLK); + renior_set_dpm_profile_freq(smu, level, SMU_SOCCLK); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; @@ -1263,6 +1285,12 @@ static int renoir_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = renoir_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_EDGE_TEMP: ret = renoir_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index ed15f5a0fd11..3d3cd546f0ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu) } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool automatic) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 7f3493b6c53c..51f1fa9789ab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -24,7 +24,7 @@ # It provides the smu management services for the driver. SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \ - smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o + smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f41ac6465f2a..6de653d2ed62 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -266,9 +266,31 @@ static int aldebaran_tables_init(struct smu_context *smu) return 0; } +static int aldebaran_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + * back for other dies, only send it on master die. + */ + if (adev->smuio.funcs->get_socket_id(adev) || + adev->smuio.funcs->get_die_id(adev)) + return 0; + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else + return -EINVAL; +} + static int aldebaran_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -276,6 +298,20 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = aldebaran_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -759,8 +795,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + return 0; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value); if (ret) { @@ -788,8 +827,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + return 0; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value); if (ret) { @@ -850,7 +892,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } switch (type) { - case SMU_OD_SCLK: case SMU_SCLK: for (i = 0; i < display_levels; i++) { clock_mhz = freq_values[i]; @@ -863,7 +904,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } break; - case SMU_OD_MCLK: case SMU_MCLK: case SMU_SOCCLK: case SMU_FCLK: @@ -1230,6 +1270,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + int r; /* Disable determinism if switching to another mode */ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && @@ -1242,7 +1283,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: return 0; - + case AMD_DPM_FORCED_LEVEL_AUTO: + r = smu_v13_0_set_performance_level(smu, level); + if (!r) + smu_v13_0_reset_custom_level(smu); + return r; case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: @@ -1257,9 +1302,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1288,7 +1334,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1308,7 +1354,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1382,7 +1428,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = aldebaran_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk, false); + if (ret) + return ret; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1401,7 +1451,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: @@ -1581,11 +1631,11 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } -static bool aldebaran_is_baco_supported(struct smu_context *smu) +static int aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ - return false; + return 0; } static int aldebaran_set_df_cstate(struct smu_context *smu, @@ -1603,29 +1653,6 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - - /* The message only works on master die and NACK will be sent - back for other dies, only send it on master die */ - if (adev->smuio.funcs->get_socket_id(adev) || - adev->smuio.funcs->get_die_id(adev)) - return 0; - - if (mode == XGMI_PLPD_DEFAULT) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, NULL); - else if (mode == XGMI_PLPD_DISALLOW) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 1, NULL); - else - return -EINVAL; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -1714,7 +1741,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; - gpu_metrics->average_mm_activity = 0; /* Valid power data is available only from primary die */ if (aldebaran_is_primary(smu)) { @@ -1846,7 +1872,6 @@ static int aldebaran_mode1_reset(struct smu_context *smu) u32 fatal_err, param; int ret = 0; struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; @@ -1859,8 +1884,8 @@ static int aldebaran_mode1_reset(struct smu_context *smu) } else { /* fatal error triggered by ras, PMFW supports the flag from 68.44.0 */ - if ((smu->smc_fw_version >= 0x00442c00) && ras && - atomic_read(&ras->in_recovery)) + if ((smu->smc_fw_version >= 0x00442c00) && + amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -1882,7 +1907,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); - + if (index < 0 ) + return -EINVAL; mutex_lock(&smu->message_lock); if (smu->smc_fw_version >= 0x00441400) { ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); @@ -1959,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int aldebaran_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -2059,18 +2080,16 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = aldebaran_is_baco_supported, + .get_bamaco_support = aldebaran_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, - .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy, .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, - .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 48170bb5112e..1c7235935d14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -79,8 +80,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -92,7 +93,6 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char fw_name[30]; char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -105,9 +105,12 @@ int smu_v13_0_init_microcode(struct smu_context *smu) amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -270,9 +273,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - if (smu->is_apu || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) - adev->pm.fw_version = smu_version; + adev->pm.fw_version = smu_version; /* only for dGPU w/ SMU13*/ if (adev->pm.fw) @@ -530,10 +531,12 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; + kfree(smu_dpm->dpm_policies); kfree(smu_dpm->dpm_context); kfree(smu_dpm->golden_dpm_context); kfree(smu_dpm->dpm_current_power_state); kfree(smu_dpm->dpm_request_power_state); + smu_dpm->dpm_policies = NULL; smu_dpm->dpm_context = NULL; smu_dpm->golden_dpm_context = NULL; smu_dpm->dpm_context_size = 0; @@ -712,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) -{ - int ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); - if (ret) - dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); - - return ret; -} - int smu_v13_0_set_driver_table_location(struct smu_context *smu) { struct smu_table *driver_table = &smu->smu_table.driver_table; @@ -764,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu) return ret; } -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) -{ - int ret = 0; - - if (!smu->pm_enabled) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); - - return ret; -} - int smu_v13_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; @@ -1076,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -int -smu_v13_0_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request - *clock_req) -{ - enum amd_pp_clock_type clk_type = clock_req->clock_type; - int ret = 0; - enum smu_clk_type clk_select = 0; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || - smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - switch (clk_type) { - case amd_pp_dcef_clock: - clk_select = SMU_DCEFCLK; - break; - case amd_pp_disp_clock: - clk_select = SMU_DISPCLK; - break; - case amd_pp_pixel_clock: - clk_select = SMU_PIXCLK; - break; - case amd_pp_phy_clock: - clk_select = SMU_PHYCLK; - break; - case amd_pp_mem_clock: - clk_select = SMU_UCLK; - break; - default: - dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); - ret = -EINVAL; - break; - } - - if (ret) - goto failed; - - if (clk_select == SMU_UCLK && smu->disable_uclk_switch) - return 0; - - ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); - - if (clk_select == SMU_UCLK) - smu->hard_min_uclk_req_from_dal = clk_freq; - } - -failed: - return ret; -} - uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1229,7 +1158,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); @@ -1321,11 +1250,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, return 0; } -static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) +void smu_v13_0_interrupt_work(struct smu_context *smu) { - return smu_cmn_send_smc_msg(smu, - SMU_MSG_ReenableAcDcInterrupt, - NULL); + smu_cmn_send_smc_msg(smu, + SMU_MSG_ReenableAcDcInterrupt, + NULL); } #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ @@ -1378,12 +1307,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, switch (ctxid) { case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(smu); + schedule_work(&smu->interrupt_work); adev->pm.ac_power = true; break; case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(smu); + schedule_work(&smu->interrupt_work); adev->pm.ac_power = false; break; case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: @@ -1559,22 +1488,9 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t clock_limit; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -1622,7 +1538,8 @@ failed: int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1637,7 +1554,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1645,7 +1565,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1656,45 +1579,6 @@ out: return ret; } -int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) -{ - int ret = 0, clk_id = 0; - uint32_t param; - - if (min <= 0 && max <= 0) - return -EINVAL; - - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; - - clk_id = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_CLK, - clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param, NULL); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param, NULL); - if (ret) - return ret; - } - - return ret; -} - int smu_v13_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { @@ -1722,6 +1606,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1753,6 +1638,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1794,13 +1680,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, vclk_min = vclk_max = 0; dclk_min = dclk_max = 0; fclk_min = fclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1812,7 +1700,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1824,7 +1713,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1839,7 +1729,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1854,7 +1745,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1866,7 +1758,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1894,6 +1787,40 @@ int smu_v13_0_set_power_source(struct smu_context *smu, NULL); } +int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret = 0; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + *value = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + *value = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + *value = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + *value = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + *value = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + *value = smu->smu_table.boot_values.dclk; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint16_t level, uint32_t *value) @@ -1905,7 +1832,7 @@ int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, return -EINVAL; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; + return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value); clk_id = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, @@ -2068,21 +1995,18 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) } int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; - int i, ret = 0; + int ret = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + if (adev->vcn.harvest_config & (1 << inst)) + return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - i << 16U, NULL); - if (ret) - return ret; - } + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + inst << 16U, NULL); return ret; } @@ -2247,7 +2171,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { @@ -2268,33 +2192,36 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) +int smu_v13_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } int smu_v13_0_baco_enter(struct smu_context *smu) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); } else { ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); @@ -2561,3 +2488,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, return ret; } + +void smu_v13_0_reset_custom_level(struct smu_context *smu) +{ + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + + pstate_table->uclk_pstate.custom.min = 0; + pstate_table->uclk_pstate.custom.max = 0; + pstate_table->gfxclk_pstate.custom.min = 0; + pstate_table->gfxclk_pstate.custom.max = 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 67117ced7c6a..5a9711e8cf68 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -107,6 +107,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -124,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -138,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), @@ -147,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), - MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0), MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), @@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_0_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static int smu_v13_0_0_system_features_control(struct smu_context *smu, bool en) { @@ -847,6 +836,10 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_MEMACTIVITY: *value = metrics->AverageUclkActivity; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_SOCKETPOWER: *value = metrics->AverageSocketPower << 8; break; @@ -973,6 +966,12 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -1143,6 +1142,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1463,6 +1470,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1560,6 +1603,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1853,6 +1906,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_0_od_restore_table_single(smu, input[0]); @@ -1975,7 +2070,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2122,7 +2218,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) @@ -2188,6 +2288,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_0_set_supported_od_feature_mask(smu); @@ -2477,97 +2581,132 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, return size; } -static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - long *input, - uint32_t size) +#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9 +#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; - u32 workload_mask; + int ret, idx; - smu->power_profile_mode = input[size]; - - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } - - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_FPS = input[1]; - activity_monitor->Gfx_MinActiveFreqType = input[2]; - activity_monitor->Gfx_MinActiveFreq = input[3]; - activity_monitor->Gfx_BoosterFreqType = input[4]; - activity_monitor->Gfx_BoosterFreq = input[5]; - activity_monitor->Gfx_PD_Data_limit_c = input[6]; - activity_monitor->Gfx_PD_Data_error_coeff = input[7]; - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_FPS = input[1]; - activity_monitor->Fclk_MinActiveFreqType = input[2]; - activity_monitor->Fclk_MinActiveFreq = input[3]; - activity_monitor->Fclk_BoosterFreqType = input[4]; - activity_monitor->Fclk_BoosterFreq = input[5]; - activity_monitor->Fclk_PD_Data_limit_c = input[6]; - activity_monitor->Fclk_PD_Data_error_coeff = input[7]; - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; - break; - } + idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), - true); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); - return ret; - } + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); + return ret; +} - if (workload_type < 0) - return -EINVAL; +static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int workload_type, ret, idx = -1, i; - workload_mask = 1 << workload_type; + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7300))) || - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - smu->adev->pm.fw_version >= 0x00504500)) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) && + ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500))) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + backend_workload_mask |= 1 << workload_type; + } + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; } + ret = smu_v13_0_0_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE); } - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - workload_mask, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + backend_workload_mask, + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) @@ -2781,10 +2920,9 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, uint32_t *param) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if ((smu->smc_fw_version >= supported_version) && - ras && atomic_read(&ras->in_recovery)) + amdgpu_ras_get_fed_status(adev)) /* Set RAS fatal error reset flag */ *param = 1 << 16; else @@ -3018,7 +3156,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .i2c_init = smu_v13_0_0_i2c_control_init, .i2c_fini = smu_v13_0_0_i2c_control_fini, .is_dpm_running = smu_v13_0_0_is_dpm_running, - .dump_pptable = smu_v13_0_0_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, @@ -3076,7 +3213,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, @@ -3093,6 +3230,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check, .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, + .interrupt_work = smu_v13_0_interrupt_work, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) @@ -3106,4 +3244,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) smu->workload_map = smu_v13_0_0_workload_map; smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION; smu_v13_0_0_set_smu_mailbox_registers(smu); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(smu->adev)) + smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c new file mode 100644 index 000000000000..e0d356f93ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -0,0 +1,537 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "smu_v13_0_12_pmfw.h" +#include "smu_v13_0_6_ppt.h" +#include "smu_v13_0_12_ppsmc.h" +#include "smu_v13_0.h" +#include "amdgpu_xgmi.h" +#include "amdgpu_fru_eeprom.h" +#include <linux/pci.h> +#include "smu_cmn.h" + +#undef MP1_Public +#undef smnMP1_FIRMWARE_FLAGS + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \ + [smu_feature] = { 1, (smu_13_0_12_feature) } + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE \ + (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK)) + +#define NUM_JPEG_RINGS_FW 10 +#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ + (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + +const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), +}; + +// clang-format off +const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), + MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), + MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), + MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), + MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), + MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), + MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), + MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), + MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), + MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), + MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), + MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), + MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), + MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), + MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), + MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), + MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), +}; + +static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, + uint64_t *feature_mask) +{ + int ret; + + ret = smu_cmn_get_enabled_mask(smu, feature_mask); + + if (ret == -EIO) { + *feature_mask = 0; + ret = 0; + } + + return ret; +} + +static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct amdgpu_fru_info *fru_info; + struct amdgpu_device *adev = smu->adev; + + if (!adev->fru_info) { + adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL); + if (!adev->fru_info) + return -ENOMEM; + } + + fru_info = adev->fru_info; + strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber, + sizeof(fru_info->product_number)); + strscpy(fru_info->product_name, static_metrics->ProductInfo.Name, + sizeof(fru_info->product_name)); + strscpy(fru_info->serial, static_metrics->ProductInfo.Serial, + sizeof(fru_info->serial)); + strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName, + sizeof(fru_info->manufacturer_name)); + strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId, + sizeof(fru_info->fru_id)); + + return 0; +} + +int smu_v13_0_12_get_max_metrics_size(void) +{ + return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); +} + +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t table_version; + int ret, i; + + if (!pptable->Init) { + ret = smu_v13_0_6_get_static_metrics_table(smu); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + + pptable->MaxSocketPowerLimit = + SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit); + pptable->MaxGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency); + pptable->MinGfxclkFrequency = + SMUQ10_ROUND(static_metrics->MinGfxclkFrequency); + + for (i = 0; i < 4; ++i) { + pptable->FclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]); + pptable->UclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]); + pptable->SocclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]); + pptable->VclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]); + pptable->DclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]); + pptable->LclkFrequencyTable[i] = + SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]); + } + + /* use AID0 serial number by default */ + pptable->PublicSerialNumber_AID = + static_metrics->PublicSerialNumber_AID[0]; + ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics); + if (ret) + return ret; + + pptable->Init = true; + } + + return 0; +} + +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu) +{ + int ret; + uint64_t feature_enabled; + + ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled); + + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + int xcc_id; + + /* For clocks with multiple instances, only report the first one */ + switch (member) { + case METRICS_CURR_GFXCLK: + case METRICS_AVERAGE_GFXCLK: + xcc_id = GET_INST(GC, 0); + *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + break; + case METRICS_CURR_SOCCLK: + case METRICS_AVERAGE_SOCCLK: + *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]); + break; + case METRICS_CURR_UCLK: + case METRICS_AVERAGE_UCLK: + *value = SMUQ10_ROUND(metrics->UclkFrequency); + break; + case METRICS_CURR_VCLK: + *value = SMUQ10_ROUND(metrics->VclkFrequency[0]); + break; + case METRICS_CURR_DCLK: + *value = SMUQ10_ROUND(metrics->DclkFrequency[0]); + break; + case METRICS_CURR_FCLK: + *value = SMUQ10_ROUND(metrics->FclkFrequency); + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = SMUQ10_ROUND(metrics->SocketGfxBusy); + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization); + break; + case METRICS_CURR_SOCKETPOWER: + *value = SMUQ10_ROUND(metrics->SocketPower) << 8; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + /* This is the max of all VRs and not just SOC VR. + * No need to define another data type for the same. + */ + case METRICS_TEMPERATURE_VRSOC: + *value = SMUQ10_ROUND(metrics->MaxVrTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) +{ + const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + MetricsTable_t *metrics; + int inst, j, k, idx; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instance */ + inst = GET_INST(VCN, k); + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(metrics-> + JpegBusy[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + metrics->VclkFrequency[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + metrics->DclkFrequency[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + metrics->SocclkFrequency[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(metrics->UclkFrequency); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]); + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + + return sizeof(*xcp_metrics); +} + +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + int ret = 0, xcc_id, inst, i, j, k, idx; + struct amdgpu_device *adev = smu->adev; + u8 num_jpeg_rings_gpu_metrics; + MetricsTable_t *metrics; + struct amdgpu_xcp *xcp; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); + + gpu_metrics->temperature_hotspot = + SMUQ10_ROUND(metrics->MaxSocketTemperature); + /* Individual HBM stack temperature is not reported */ + gpu_metrics->temperature_mem = + SMUQ10_ROUND(metrics->MaxHbmTemperature); + /* Reports max temperature of all voltage rails */ + gpu_metrics->temperature_vrsoc = + SMUQ10_ROUND(metrics->MaxVrTemperature); + + gpu_metrics->average_gfx_activity = + SMUQ10_ROUND(metrics->SocketGfxBusy); + gpu_metrics->average_umc_activity = + SMUQ10_ROUND(metrics->DramBandwidthUtilization); + + gpu_metrics->mem_max_bandwidth = + SMUQ10_ROUND(metrics->MaxDramBandwidth); + + gpu_metrics->curr_socket_power = + SMUQ10_ROUND(metrics->SocketPower); + /* Energy counter reported in 15.259uJ (2^-16) units */ + gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; + + for (i = 0; i < MAX_GFX_CLKS; i++) { + xcc_id = GET_INST(GC, i); + if (xcc_id >= 0) + gpu_metrics->current_gfxclk[i] = + SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + + if (i < MAX_CLKS) { + gpu_metrics->current_socclk[i] = + SMUQ10_ROUND(metrics->SocclkFrequency[i]); + inst = GET_INST(VCN, i); + if (inst >= 0) { + gpu_metrics->current_vclk0[i] = + SMUQ10_ROUND(metrics->VclkFrequency[inst]); + gpu_metrics->current_dclk0[i] = + SMUQ10_ROUND(metrics->DclkFrequency[inst]); + } + } + } + + gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = metrics->AccumulationCounter; + + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc; + gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc; + gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc; + gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc; + gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc; + + /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ + gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0); + + gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth; + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(metrics->PCIeLinkSpeed); + gpu_metrics->pcie_bandwidth_acc = + SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]); + gpu_metrics->pcie_bandwidth_inst = + SMUQ10_ROUND(metrics->PcieBandwidth[0]); + gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc; + gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc; + gpu_metrics->pcie_replay_rover_count_acc = + metrics->PCIenReplayARolloverCountAcc; + gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc; + gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc; + gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc); + gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); + + for (i = 0; i < NUM_XGMI_LINKS; i++) { + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = + SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); + gpu_metrics->xgmi_write_data_acc[j] = + SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[j] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * num_jpeg_rings_gpu_metrics) + j] = + SMUQ10_ROUND(metrics->JpegBusy + [(inst * NUM_JPEG_RINGS_FW) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + idx++; + } + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + } + + gpu_metrics->xgmi_link_width = metrics->XgmiWidth; + gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; + + gpu_metrics->firmware_timestamp = metrics->Timestamp; + + *table = (void *)gpu_metrics; + + return sizeof(*gpu_metrics); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 949131bd1ecb..b081ae3e8f43 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -227,14 +227,16 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) int ret = 0; if (!en && !adev->in_s0ix) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } @@ -328,7 +330,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->AverageSocketPower << 8) / 1000; @@ -582,6 +584,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_4_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -750,31 +758,9 @@ static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 0dce672ac1b9..f5db181ef489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en) return ret; } -static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -286,7 +288,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -332,6 +334,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_5_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = smu_v13_0_5_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, @@ -637,7 +645,7 @@ static int smu_v13_0_5_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -727,31 +735,9 @@ static int smu_v13_0_5_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -827,9 +813,10 @@ failed: } static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -966,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1062,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1076,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1085,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 3957af057d54..f00ef7f3f355 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -68,6 +68,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) @@ -95,7 +96,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define LINK_SPEED_MAX 4 - #define SMU_13_0_6_DSCLK_THRESHOLD 140 #define MCA_BANK_IPID(_ip, _hwid, _type) \ @@ -120,6 +120,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +#define P2S_TABLE_ID_3 0x50325303 // clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { @@ -138,13 +139,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -167,12 +168,16 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), - MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0), - MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0), - MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), - MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), + MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0), }; // clang-format on @@ -209,7 +214,11 @@ static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_CO SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), - SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), }; #define TABLE_PMSTATUSLOG 0 @@ -231,27 +240,13 @@ static const uint8_t smu_v13_0_6_throttler_map[] = { [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), }; -struct PPTable_t { - uint32_t MaxSocketPowerLimit; - uint32_t MaxGfxclkFrequency; - uint32_t MinGfxclkFrequency; - uint32_t FclkFrequencyTable[4]; - uint32_t UclkFrequencyTable[4]; - uint32_t SocclkFrequencyTable[4]; - uint32_t VclkFrequencyTable[4]; - uint32_t DclkFrequencyTable[4]; - uint32_t LclkFrequencyTable[4]; - uint32_t MaxLclkDpmRange; - uint32_t MinLclkDpmRange; - uint64_t PublicSerialNumber_AID; - bool Init; -}; - -#define SMUQ10_TO_UINT(x) ((x) >> 10) -#define SMUQ10_FRAC(x) ((x) & 0x3ff) -#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ - (metrics_a->field) : (metrics_x->field)) +#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\ + (metrics_v0->field) : (metrics_v2->field)) +#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\ + (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version)) +#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\ + sizeof(MetricsTableV1_t),\ + sizeof(MetricsTableV2_t))) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -260,6 +255,187 @@ struct smu_v13_0_6_dpm_map { uint32_t *freq_table; }; +static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu) +{ + if ((smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900) + return METRICS_VERSION_V1; + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12)) + return METRICS_VERSION_V2; + + return METRICS_VERSION_V0; +} + +static inline void smu_v13_0_6_cap_set(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + dpm_context->caps |= BIT_ULL(cap); +} + +static inline void smu_v13_0_6_cap_clear(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + dpm_context->caps &= ~BIT_ULL(cap); +} + +bool smu_v13_0_6_cap_supported(struct smu_context *smu, + enum smu_v13_0_6_caps cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + return !!(dpm_context->caps & BIT_ULL(cap)); +} + +static void smu_v13_0_14_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(DPM_POLICY), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND) }; + uint32_t fw_ver = smu->smc_fw_version; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver >= 0x05550E00) + smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); + if (fw_ver >= 0x05550B00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + if (fw_ver >= 0x5551200) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + if (fw_ver >= 0x5551600) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } +} + +static void smu_v13_0_12_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(PER_INST_METRICS) }; + uint32_t fw_ver = smu->smc_fw_version; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver < 0x00561900) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM)); + + if (fw_ver >= 0x00561700) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + + if (fw_ver >= 0x00561E00) + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + + if (fw_ver >= 0x00562500) + smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); +} + +static void smu_v13_0_6_init_caps(struct smu_context *smu) +{ + enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(DPM_POLICY), + SMU_CAP(PCIE_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND) }; + struct amdgpu_device *adev = smu->adev; + uint32_t fw_ver = smu->smc_fw_version; + uint32_t pgm = (fw_ver >> 24) & 0xFF; + + for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++) + smu_v13_0_6_cap_set(smu, default_cap_list[i]); + + if (fw_ver < 0x552F00) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM)); + if (fw_ver < 0x554500) + smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT)); + + if (adev->flags & AMD_IS_APU) { + smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); + smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); + + if (fw_ver >= 0x04556A00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + } else { + if (fw_ver >= 0x557600) + smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); + if (fw_ver < 0x00556000) + smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY)); + if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600)) + smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX)); + if (fw_ver < 0x556300) + smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS)); + if (fw_ver < 0x554800) + smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE)); + if (fw_ver >= 0x556F00) + smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); + if (fw_ver < 0x00555a00) + smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); + if (fw_ver < 0x00555600) + smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); + if ((pgm == 7 && fw_ver >= 0x7550E00) || + (pgm == 0 && fw_ver >= 0x00557E00)) + smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + if ((pgm == 0 && fw_ver >= 0x00557F01) || + (pgm == 7 && fw_ver >= 0x7551000)) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + } + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } + if (((pgm == 7) && (fw_ver >= 0x7550700)) || + ((pgm == 0) && (fw_ver >= 0x00557900)) || + ((pgm == 4) && (fw_ver >= 0x4557000))) + smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); +} + +static void smu_v13_0_x_init_caps(struct smu_context *smu) +{ + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + return smu_v13_0_12_init_caps(smu); + case IP_VERSION(13, 0, 14): + return smu_v13_0_14_init_caps(smu); + default: + return smu_v13_0_6_init_caps(smu); + } +} + +static int smu_v13_0_6_check_fw_version(struct smu_context *smu) +{ + int r; + + r = smu_v13_0_check_fw_version(smu); + /* Initialize caps flags once fw version is fetched */ + if (!r) + smu_v13_0_x_init_caps(smu); + + return r; +} + static int smu_v13_0_6_init_microcode(struct smu_context *smu) { const struct smc_firmware_header_v2_1 *v2_1; @@ -269,22 +445,24 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t p2s_table_id = P2S_TABLE_ID_A; int ret = 0, i, p2stable_count; + int var = (adev->pdev->device & 0xF); char ucode_prefix[15]; - char fw_name[30]; - /* No need to load P2S tables in IOV mode */ - if (amdgpu_sriov_vf(adev)) + /* No need to load P2S tables in IOV mode or for smu v13.0.12 */ + if (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))) return 0; - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { p2s_table_id = P2S_TABLE_ID_X; + if (var == 0x5) + p2s_table_id = P2S_TABLE_ID_3; + } amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (ret) goto out; @@ -329,13 +507,15 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; struct amdgpu_device *adev = smu->adev; + int gpu_metrcs_size = METRICS_TABLE_SIZE; if (!(adev->flags & AMD_IS_APU)) SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, - max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), + max(gpu_metrcs_size, + smu_v13_0_12_get_max_metrics_size()), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); @@ -343,13 +523,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t), - sizeof(MetricsTableA_t)), GFP_KERNEL); + smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -368,9 +547,78 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return 0; } +static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu, + int policy) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (policy) { + case SOC_PSTATE_DEFAULT: + param = 0; + break; + case SOC_PSTATE_0: + param = 1; + break; + case SOC_PSTATE_1: + param = 2; + break; + case SOC_PSTATE_2: + param = 3; + break; + default: + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy, + param, NULL); + + if (ret) + dev_err(adev->dev, "select soc pstate policy %d failed", + policy); + + return ret; +} + +static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (level) { + case XGMI_PLPD_DEFAULT: + param = PPSMC_PLPD_MODE_DEFAULT; + break; + case XGMI_PLPD_OPTIMIZED: + param = PPSMC_PLPD_MODE_OPTIMIZED; + break; + case XGMI_PLPD_DISALLOW: + param = 0; + break; + default: + return -EINVAL; + } + + if (level == XGMI_PLPD_DISALLOW) + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, param, NULL); + else + /* change xgmi per-link power down policy */ + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_SelectPLPDMode, param, NULL); + + if (ret) + dev_err(adev->dev, + "select xgmi per-link power down policy %d failed\n", + level); + + return ret; +} + static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -378,6 +626,36 @@ static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + if (!smu_dpm->dpm_policies) { + kfree(smu_dpm->dpm_context); + return -ENOMEM; + } + + if (!(smu->adev->flags & AMD_IS_APU)) { + policy = &(smu_dpm->dpm_policies->policies[0]); + + policy->policy_type = PP_PM_POLICY_SOC_PSTATE; + policy->level_mask = BIT(SOC_PSTATE_DEFAULT) | + BIT(SOC_PSTATE_0) | BIT(SOC_PSTATE_1) | + BIT(SOC_PSTATE_2); + policy->current_level = SOC_PSTATE_DEFAULT; + policy->set_policy = smu_v13_0_6_select_policy_soc_pstate; + smu_cmn_generic_soc_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= + BIT(PP_PM_POLICY_SOC_PSTATE); + } + policy = &(smu_dpm->dpm_policies->policies[1]); + + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT) | + BIT(XGMI_PLPD_OPTIMIZED); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = smu_v13_0_6_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -463,7 +741,7 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header)); pm_metrics->common_header.mp1_ip_discovery_version = - IP_VERSION(13, 0, 6); + amdgpu_ip_version(smu->adev, MP1_HWIP, 0); pm_metrics->common_header.pmfw_version = pmfw_version; pm_metrics->common_header.pmmetrics_version = table_version; pm_metrics->common_header.structure_size = @@ -472,17 +750,61 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, return pm_metrics->common_header.structure_size; } +static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; +} + +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; + struct smu_table *table = &smu_table->driver_table; + int ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export static metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + + return 0; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - struct amdgpu_device *adev = smu->adev; + int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_setup_driver_pptable(smu); + /* Store one-time values in driver PPTable */ if (!pptable->Init) { while (--retry) { @@ -491,7 +813,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter)) + if (GET_METRIC_FIELD(AccumulationCounter, version)) break; usleep_range(1000, 1100); @@ -508,31 +830,38 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; + pptable->PublicSerialNumber_AID = + GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0]; pptable->Init = true; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_6_get_static_metrics_table(smu); + if (ret) + return ret; + smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); + } } return 0; @@ -636,6 +965,15 @@ static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu, return ret; } +static void smu_v13_0_6_pm_policy_init(struct smu_context *smu) +{ + struct smu_dpm_policy *policy; + + policy = smu_get_pm_policy(smu, PP_PM_POLICY_SOC_PSTATE); + if (policy) + policy->current_level = SOC_PSTATE_DEFAULT; +} + static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -665,6 +1003,15 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) smu_v13_0_6_setup_driver_pptable(smu); + /* DPM policy not supported in older firmwares */ + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) { + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_policies->policy_mask &= + ~BIT(PP_PM_POLICY_SOC_PSTATE); + } + + smu_v13_0_6_pm_policy_init(smu); /* gfxclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.gfx_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { @@ -832,8 +1179,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; - MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; + MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; + MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; + int version = smu_v13_0_6_get_metrics_version(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -842,56 +1191,60 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, if (ret) return ret; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + return smu_v13_0_12_get_smu_metrics_data(smu, member, value); + /* For clocks with multiple instances, only report the first one */ switch (member) { case METRICS_CURR_GFXCLK: case METRICS_AVERAGE_GFXCLK: - if (smu->smc_fw_version >= 0x552F00) { + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1010,8 +1363,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + break; case SMU_SCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -1052,8 +1408,14 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); - fallthrough; + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX))) + return 0; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + break; case SMU_MCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); @@ -1304,6 +1666,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; int ret = 0; if (amdgpu_ras_intr_triggered()) @@ -1348,6 +1711,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VDDBOARD: + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + *(uint32_t *)data = dpm_context->board_volt; + *size = 4; + break; + } else { + ret = -EOPNOTSUPP; + break; + } case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; @@ -1527,7 +1899,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu) static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) { /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */ - if (smu->smc_fw_version < 0x554800) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE))) return 0; return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, @@ -1610,12 +1982,12 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { /* Min UCLK is not expected to be changed */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, uclk_table->max); + smu, SMU_UCLK, 0, uclk_table->max, false); if (ret) return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; } - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); return 0; case AMD_DPM_FORCED_LEVEL_MANUAL: @@ -1624,12 +1996,13 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, break; } - return -EINVAL; + return -EOPNOTSUPP; } static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1670,9 +2043,13 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (clk_type == SMU_UCLK) { if (max == pstate_table->uclk_pstate.curr.max) return 0; + /* For VF, only allowed in FW versions 85.102 or greater */ + if (!smu_v13_0_6_cap_supported(smu, + SMU_CAP(SET_UCLK_MAX))) + return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, max); + smu, SMU_UCLK, 0, max, false); if (!ret) pstate_table->uclk_pstate.curr.max = max; } @@ -1812,7 +2189,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = dpm_context->dpm_tables.gfx_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1820,10 +2197,10 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.uclk_table.min; max_clk = dpm_context->dpm_tables.uclk_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1844,7 +2221,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = pstate_table->gfxclk_pstate.custom.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1855,7 +2232,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->uclk_pstate.curr.min; max_clk = pstate_table->uclk_pstate.custom.max; return smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); } break; default: @@ -1872,7 +2249,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, ret = smu_cmn_get_enabled_mask(smu, feature_mask); - if (ret == -EIO && smu->smc_fw_version < 0x552F00) { + if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { *feature_mask = 0; ret = 0; } @@ -1885,6 +2262,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) int ret; uint64_t feature_enabled; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) + return smu_v13_0_12_is_dpm_running(smu); + ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled); if (ret) @@ -1973,8 +2353,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_v13_0_6_request_i2c_xfer(smu, req); - if (r) - goto fail; + if (r) { + /* Retry once, in case of an i2c collision */ + r = smu_v13_0_6_request_i2c_xfer(smu, req); + if (r) + goto fail; + } for (c = i = 0; i < num_msgs; i++) { if (!(msg[i].flags & I2C_M_RD)) { @@ -2077,11 +2461,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu) adev->unique_id = pptable->PublicSerialNumber_AID; } -static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) +static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu) { /* smu_13_0_6 does not support baco */ - return false; + return 0; } static const char *const throttling_logging_label[] = { @@ -2159,76 +2543,233 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) return pcie_gen_to_speed(speed_level + 1); } +static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, + void *table) +{ + const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + int version = smu_v13_0_6_get_metrics_version(smu); + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + int ret, inst, i, j, k, idx; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; + u32 inst_mask; + bool per_inst; + + if (!table) + return sizeof(*xcp_metrics); + + for_each_xcp(adev->xcp_mgr, xcp, i) { + if (xcp->id == xcp_id) + break; + } + if (i == adev->xcp_mgr->num_xcps) + return -EINVAL; + + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + if (!metrics_v0) + return -ENOMEM; + + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + if (ret) { + kfree(metrics_v0); + return ret; + } + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); + goto out; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(VclkFrequency, version)[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(DclkFrequency, version)[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(SocclkFrequency, version)[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); + + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, + version)[inst]); + + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + xcp_metrics->gfx_below_host_limit_thm_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + xcp_metrics->gfx_low_utilization_acc + [idx] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); + } + idx++; + } + } +out: + kfree(metrics_v0); + + return sizeof(*xcp_metrics); +} + static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_5 *gpu_metrics = - (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + int version = smu_v13_0_6_get_metrics_version(smu); + int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i, j; - MetricsTableX_t *metrics_x; - MetricsTableA_t *metrics_a; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; u16 link_width_level; + ssize_t num_bytes; + u8 num_jpeg_rings; + u32 inst_mask; + bool per_inst; - metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); if (ret) { - kfree(metrics_x); + kfree(metrics_v0); return ret; } - metrics_a = (MetricsTableA_t *)metrics_x; + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + kfree(metrics_v0); + return num_bytes; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version)); + + gpu_metrics->mem_max_bandwidth = + SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, + version)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, + version)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); - /* Throttle status is not reported through metrics now */ - gpu_metrics->throttle_status = 0; + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); + + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version); + gpu_metrics->hbm_thm_residency_acc = + GET_METRIC_FIELD(HbmThmResidencyAcc, version); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, + version) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - if (!amdgpu_sriov_vf(adev)) { + /*Check smu version, PCIE link speed and width will be reported from pmfw metric + * table for both pf & one vf for smu version 85.99.0 or higher else report only + * for pf from registers + */ + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) { + gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version); + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version)); + } else if (!amdgpu_sriov_vf(adev)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); if (link_width_level > MAX_LINK_WIDTH) link_width_level = 0; @@ -2238,62 +2779,122 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } + gpu_metrics->pcie_bandwidth_acc = - SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]); gpu_metrics->pcie_bandwidth_inst = - SMUQ10_ROUND(metrics_x->PcieBandwidth[0]); + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]); gpu_metrics->pcie_l0_to_recov_count_acc = - metrics_x->PCIeL0ToRecoveryCountAcc; + GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version); gpu_metrics->pcie_replay_count_acc = - metrics_x->PCIenReplayAAcc; + GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version); gpu_metrics->pcie_replay_rover_count_acc = - metrics_x->PCIenReplayARolloverCountAcc; + GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version); gpu_metrics->pcie_nak_sent_count_acc = - metrics_x->PCIeNAKSentCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version); gpu_metrics->pcie_nak_rcvd_count_acc = - metrics_x->PCIeNAKReceivedCountAcc; + GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS))) + gpu_metrics->pcie_lc_perf_other_end_recovery = + GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); - gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); - } - - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - inst = GET_INST(JPEG, i); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) - [(inst * adev->jpeg.num_jpeg_rings) + j]); + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); + ret = amdgpu_get_xgmi_link_status(adev, i); + if (ret >= 0) + gpu_metrics->xgmi_link_status[j] = ret; + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) + [(inst * num_jpeg_rings) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + idx++; + } - } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - inst = GET_INST(VCN, i); - gpu_metrics->vcn_activity[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); + gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version); + gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); *table = (void *)gpu_metrics; - kfree(metrics_x); + kfree(metrics_v0); return sizeof(*gpu_metrics); } +static void smu_v13_0_6_restore_pci_config(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < 16; i++) + pci_write_config_dword(adev->pdev, i * 4, + adev->pdev->saved_config_space[i]); + pci_restore_msi_state(adev->pdev); +} + static int smu_v13_0_6_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -2302,6 +2903,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); + if (index < 0) + return index; mutex_lock(&smu->message_lock); @@ -2315,6 +2918,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) /* Restore the config space saved during init */ amdgpu_device_load_pci_state(adev->pdev); + /* Certain platforms have switches which assign virtual BAR values to + * devices. OS uses the virtual BAR values and device behind the switch + * is assgined another BAR value. When device's config space registers + * are queried, switch returns the virtual BAR values. When mode-2 reset + * is performed, switch is unaware of it, and will continue to return + * the same virtual values to the OS.This affects + * pci_restore_config_space() API as it doesn't write the value saved if + * the current value read from config space is the same as what is + * saved. As a workaround, make sure the config space is restored + * always. + */ + if (!(adev->flags & AMD_IS_APU)) + smu_v13_0_6_restore_pci_config(smu); + dev_dbg(smu->adev->dev, "wait for reset ack\n"); do { ret = smu_cmn_wait_for_response(smu); @@ -2355,7 +2972,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu, return -EINVAL; /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */ - if (smu->smc_fw_version < 0x554500) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT))) return 0; /* Get SOC Max operating temperature */ @@ -2409,24 +3026,14 @@ failed: static int smu_v13_0_6_mode1_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_hive_info *hive = NULL; - u32 hive_ras_recovery = 0; - struct amdgpu_ras *ras; u32 fatal_err, param; int ret = 0; - hive = amdgpu_get_xgmi_hive(adev); - ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; - if (hive) { - hive_ras_recovery = atomic_read(&hive->ras_recovery); - amdgpu_put_xgmi_hive(hive); - } - /* fatal error triggered by ras, PMFW supports the flag */ - if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + if (amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -2439,14 +3046,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu) return ret; } +static int smu_v13_0_6_link_reset(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_4, NULL); + return ret; +} + static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) { return true; } -static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) +static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu) { - return true; + struct amdgpu_device *adev = smu->adev; + int var = (adev->pdev->device & 0xF); + + if (var == 0x1) + return true; + + return false; } static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, @@ -2467,11 +3089,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) { - struct amdgpu_device *adev = smu->adev; int ret; /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */ - if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00) + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG))) return 0; ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL); @@ -2483,6 +3104,56 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) return ret; } +/** + * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported + * @smu: smu_context pointer + * + * This function checks if the SMU supports resetting the SDMA engine. + * It returns false if the capability is not supported. + */ +static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu) +{ + bool ret = true; + + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) { + dev_info(smu->adev->dev, + "SDMA reset capability is not supported\n"); + ret = false; + } + + return ret; +} + +static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + if (!smu_v13_0_6_reset_sdma_is_supported(smu)) + return -EOPNOTSUPP; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_ResetSDMA, inst_mask, NULL); + if (ret) + dev_err(smu->adev->dev, + "failed to send ResetSDMA event with mask 0x%x\n", + inst_mask); + + return ret; +} + +static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL); + if (ret) + dev_err(smu->adev->dev, + "failed to send ResetVCN event with mask 0x%x\n", + inst_mask); + return ret; +} + + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2671,6 +3342,11 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct umc_v12_0_is_correctable_error(adev, status0)) *count = (ext_error_code == 0) ? odecc_err_cnt : 1; + amdgpu_umc_update_ecc_status(adev, + entry->regs[MCA_REG_IDX_STATUS], + entry->regs[MCA_REG_IDX_IPID], + entry->regs[MCA_REG_IDX_ADDR]); + return 0; } @@ -2684,7 +3360,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); - if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && + (ext_error_code == 0 || ext_error_code == 9)) *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) *count = err_cnt; @@ -2787,7 +3464,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd if (instlo != 0x03b30400) return false; - if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) { + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) { errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]); errcode &= 0xff; } else { @@ -2804,6 +3481,16 @@ static int mmhub_err_codes[] = { CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, }; +static int vcn_err_codes[] = { + CODE_VIDD, CODE_VIDV, +}; +static int jpeg_err_codes[] = { + CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D, + CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D, + CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D, + CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D, +}; + static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, @@ -2832,6 +3519,20 @@ static const struct mca_ras_info mca_ras_table[] = { .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, .ip = AMDGPU_MCA_IP_PCS_XGMI, .get_err_count = mca_pcs_xgmi_mca_get_err_count, + }, { + .blkid = AMDGPU_RAS_BLOCK__VCN, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = vcn_err_codes, + .err_code_count = ARRAY_SIZE(vcn_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__JPEG, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = jpeg_err_codes, + .err_code_count = ARRAY_SIZE(jpeg_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, }; @@ -2877,55 +3578,6 @@ static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_i return true; } -static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - struct mca_bank_entry entry; - uint32_t mca_cnt; - int i, ret; - - ret = mca_get_valid_mca_count(adev, type, &mca_cnt); - if (ret) - return ret; - - /* if valid mca bank count is 0, the driver can return 0 directly */ - if (!mca_cnt) - return 0; - - for (i = 0; i < mca_cnt; i++) { - memset(&entry, 0, sizeof(entry)); - ret = mca_get_mca_entry(adev, type, i, &entry); - if (ret) - return ret; - - if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry)) - continue; - - ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry); - if (ret) - return ret; - } - - return 0; -} - -static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - const struct mca_ras_info *mca_ras = NULL; - - if (!mca_set) - return -EINVAL; - - if (blk != AMDGPU_RAS_BLOCK_COUNT) { - mca_ras = mca_get_mca_ras_info(adev, blk); - if (!mca_ras) - return -EOPNOTSUPP; - } - - return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set); -} - static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { @@ -2962,7 +3614,6 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .max_ue_count = 12, .max_ce_count = 12, .mca_set_debug_mode = mca_smu_set_debug_mode, - .mca_get_ras_mca_set = mca_smu_get_ras_mca_set, .mca_parse_mca_error_count = mca_smu_parse_mca_error_count, .mca_get_mca_entry = mca_smu_get_mca_entry, .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, @@ -2975,7 +3626,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return smu_v13_0_6_mca_set_debug_mode(smu, enable); } -static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count) { uint32_t msg; int ret; @@ -2984,10 +3635,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err return -EINVAL; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_QueryValidMcaCount; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_QueryValidMcaCeCount; break; default: @@ -3004,14 +3655,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err } static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, - enum aca_error_type type, u32 *count) + enum aca_smu_type type, u32 *count) { struct smu_context *smu = adev->powerplay.pp_handle; int ret; switch (type) { - case ACA_ERROR_TYPE_UE: - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_UE: + case ACA_SMU_TYPE_CE: ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); break; default: @@ -3022,16 +3673,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, return ret; } -static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val) { uint32_t msg, param; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_McaBankDumpDW; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_McaBankCeDumpDW; break; default: @@ -3043,7 +3694,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); } -static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val, int count) { int ret, i; @@ -3060,7 +3711,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ return 0; } -static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type, int idx, int reg_idx, u64 *val) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3077,13 +3728,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ *val = (u64)data[1] << 32 | data[0]; dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", - type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); return 0; } static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, - enum aca_error_type type, int idx, struct aca_bank *bank) + enum aca_smu_type type, int idx, struct aca_bank *bank) { int i, ret, count; @@ -3097,52 +3748,28 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, return 0; } +static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int error_code; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + else + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .max_ue_bank_count = 12, .max_ce_bank_count = 12, .set_debug_mode = aca_smu_set_debug_mode, .get_valid_aca_count = aca_smu_get_valid_aca_count, .get_valid_aca_bank = aca_smu_get_valid_aca_bank, + .parse_error_code = aca_smu_parse_error_code, }; -static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - int ret, param; - - switch (mode) { - case XGMI_PLPD_DEFAULT: - param = PPSMC_PLPD_MODE_DEFAULT; - break; - case XGMI_PLPD_OPTIMIZED: - param = PPSMC_PLPD_MODE_OPTIMIZED; - break; - case XGMI_PLPD_DISALLOW: - param = 0; - break; - default: - return -EINVAL; - } - - if (mode == XGMI_PLPD_DISALLOW) - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - param, NULL); - else - /* change xgmi per-link power down policy */ - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SelectPLPDMode, - param, NULL); - - if (ret) - dev_err(adev->dev, - "select xgmi per-link power down policy %d failed\n", - mode); - - return ret; -} - static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3164,7 +3791,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .fini_power = smu_v13_0_fini_power, .check_fw_status = smu_v13_0_6_check_fw_status, /* pptable related */ - .check_fw_version = smu_v13_0_check_fw_version, + .check_fw_version = smu_v13_0_6_check_fw_version, .set_driver_table_location = smu_v13_0_set_driver_table_location, .set_tool_table_location = smu_v13_0_set_tool_table_location, .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, @@ -3179,35 +3806,42 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .enable_thermal_alert = smu_v13_0_enable_thermal_alert, .disable_thermal_alert = smu_v13_0_disable_thermal_alert, .setup_pptable = smu_v13_0_6_setup_pptable, - .baco_is_support = smu_v13_0_6_is_baco_supported, + .get_bamaco_support = smu_v13_0_6_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, - .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy, .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, .get_pm_metrics = smu_v13_0_6_get_pm_metrics, + .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, + .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, + .link_reset = smu_v13_0_6_link_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, .send_rma_reason = smu_v13_0_6_send_rma_reason, + .reset_sdma = smu_v13_0_6_reset_sdma, + .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, + .dpm_reset_vcn = smu_v13_0_6_reset_vcn, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &smu_v13_0_6_ppt_funcs; - smu->message_map = smu_v13_0_6_message_map; + smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_message_map : smu_v13_0_6_message_map; smu->clock_map = smu_v13_0_6_clk_map; - smu->feature_map = smu_v13_0_6_feature_mask_map; + smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ? + smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; + smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index f0fa42a645c0..d38d6d76b1e7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -26,7 +26,64 @@ #define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2 #define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4 #define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2 +#define SMU_CAP(x) SMU_13_0_6_CAPS_##x + +typedef enum { +/*0*/ METRICS_VERSION_V0 = 0, +/*1*/ METRICS_VERSION_V1 = 1, +/*2*/ METRICS_VERSION_V2 = 2, + +/*3*/ NUM_METRICS = 3 +} METRICS_LIST_e; + +struct PPTable_t { + uint32_t MaxSocketPowerLimit; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + uint64_t PublicSerialNumber_AID; + bool Init; +}; + +enum smu_v13_0_6_caps { + SMU_CAP(DPM), + SMU_CAP(DPM_POLICY), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(PCIE_METRICS), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(PER_INST_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND), + SMU_CAP(SDMA_RESET), + SMU_CAP(STATIC_METRICS), + SMU_CAP(HST_LIMIT_METRICS), + SMU_CAP(BOARD_VOLTAGE), + SMU_CAP(PLDM_VERSION), + SMU_CAP(ALL), +}; extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); +bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); +int smu_v13_0_12_get_max_metrics_size(void); +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, uint32_t *value); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, + struct amdgpu_xcp *xcp, void *table, + void *smu_metrics); +extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; +extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7318964f1f14..c8f4f6fb4083 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -83,6 +83,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_7_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -818,6 +807,10 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, else *value = metrics->AverageMemclkFrequencyPreDs; break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; case METRICS_AVERAGE_VCLK: *value = metrics->AverageVclk0Frequency; break; @@ -962,6 +955,12 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_7_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_7_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -1132,6 +1131,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1452,6 +1459,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1548,6 +1591,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1841,6 +1894,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_7_od_restore_table_single(smu, input[0]); @@ -1964,7 +2059,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2061,6 +2157,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; @@ -2106,7 +2204,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) @@ -2172,6 +2274,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_7_set_supported_od_feature_mask(smu); @@ -2378,7 +2484,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf size += sysfs_emit_at(buf, size, " "); for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) - size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], + size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "* " : " "); size += sysfs_emit_at(buf, size, "\n"); @@ -2408,7 +2514,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf do { \ size += sysfs_emit_at(buf, size, "%-30s", #field); \ for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ - size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ + size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ size += sysfs_emit_at(buf, size, "\n"); \ } while (0) @@ -2434,69 +2540,110 @@ out: return result; } -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8 +#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); - int workload_type, ret = 0; + int ret, idx; - smu->power_profile_mode = input[size]; + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; + idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_ActiveHystLimit = input[idx + 1]; + activity_monitor->Gfx_IdleHystLimit = input[idx + 2]; + activity_monitor->Gfx_FPS = input[idx + 3]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 5]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 6]; + activity_monitor->Gfx_BoosterFreq = input[idx + 7]; + } + idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_ActiveHystLimit = input[idx + 1]; + activity_monitor->Fclk_IdleHystLimit = input[idx + 2]; + activity_monitor->Fclk_FPS = input[idx + 3]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 5]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 6]; + activity_monitor->Fclk_BoosterFreq = input[idx + 7]; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), false); - if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return ret; - } + return ret; +} - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor->Gfx_ActiveHystLimit = input[1]; - activity_monitor->Gfx_IdleHystLimit = input[2]; - activity_monitor->Gfx_FPS = input[3]; - activity_monitor->Gfx_MinActiveFreqType = input[4]; - activity_monitor->Gfx_BoosterFreqType = input[5]; - activity_monitor->Gfx_MinActiveFreq = input[6]; - activity_monitor->Gfx_BoosterFreq = input[7]; - break; - case 1: /* Fclk */ - activity_monitor->Fclk_ActiveHystLimit = input[1]; - activity_monitor->Fclk_IdleHystLimit = input[2]; - activity_monitor->Fclk_FPS = input[3]; - activity_monitor->Fclk_MinActiveFreqType = input[4]; - activity_monitor->Fclk_BoosterFreqType = input[5]; - activity_monitor->Fclk_MinActiveFreq = input[6]; - activity_monitor->Fclk_BoosterFreq = input[7]; - break; +static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; } - - ret = smu_cmn_update_table(smu, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, - (void *)(&activity_monitor_external), true); + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v13_0_7_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); if (ret) { - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + if (idx != -1) + smu->custom_profile_params[idx] = 0; return ret; } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE); } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); - if (workload_type < 0) - return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } return ret; } @@ -2596,7 +2743,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, .is_dpm_running = smu_v13_0_7_is_dpm_running, - .dump_pptable = smu_v13_0_7_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, @@ -2650,7 +2796,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, @@ -2661,6 +2807,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check, .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, + .interrupt_work = smu_v13_0_interrupt_work, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 2d1736234b4a..73b4506ef5a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en) return ret; } -static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -363,7 +365,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -423,6 +425,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = yellow_carp_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = yellow_carp_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, @@ -771,7 +779,7 @@ static int yellow_carp_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -861,31 +869,9 @@ static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -961,9 +947,10 @@ failed: } static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -1150,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1270,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1282,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (fclk_min && fclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_FCLK, - fclk_min, - fclk_max); + SMU_FCLK, + fclk_min, + fclk_max, + false); if (ret) return ret; } if (socclk_min && socclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SOCCLK, - socclk_min, - socclk_max); + SMU_SOCCLK, + socclk_min, + socclk_max, + false); if (ret) return ret; } @@ -1302,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1311,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile index ddbac5c655f7..4593e29e8ff8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o +SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 07a65e005785..76c1adda83db 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -38,9 +38,18 @@ #include "amdgpu_ras.h" #include "smu_cmn.h" -#include "asic_reg/mp/mp_14_0_0_offset.h" -#include "asic_reg/mp/mp_14_0_0_sh_mask.h" - +#include "asic_reg/thm/thm_14_0_2_offset.h" +#include "asic_reg/thm/thm_14_0_2_sh_mask.h" +#include "asic_reg/mp/mp_14_0_2_offset.h" +#include "asic_reg/mp/mp_14_0_2_sh_mask.h" + +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 + +const int decoded_link_speed[5] = {1, 2, 3, 4, 5}; +const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; /* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. @@ -52,13 +61,13 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char fw_name[30]; char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -70,10 +79,8 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -106,7 +113,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu) int smu_v14_0_load_microcode(struct smu_context *smu) { -#if 0 struct amdgpu_device *adev = smu->adev; const uint32_t *src; const struct smc_firmware_header_v1_0 *hdr; @@ -131,8 +137,12 @@ int smu_v14_0_load_microcode(struct smu_context *smu) 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); for (i = 0; i < adev->usec_timeout; i++) { - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if (smu->is_apu) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) break; @@ -142,9 +152,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu) if (i == adev->usec_timeout) return -ETIME; -#endif return 0; - } int smu_v14_0_init_pptable_microcode(struct smu_context *smu) @@ -165,6 +173,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu) if (!adev->scpm_enabled) return 0; + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) + return 0; + /* override pptable_id from driver parameter */ if (amdgpu_smu_pptable_id >= 0) { pptable_id = amdgpu_smu_pptable_id; @@ -198,7 +210,11 @@ int smu_v14_0_check_fw_status(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; - mp1_fw_flags = RREG32_PCIE(MP1_Public | + if (smu->is_apu) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> @@ -227,16 +243,18 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) adev->pm.fw_version = smu_version; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; - break; case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; - + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; + break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", amdgpu_ip_version(adev, MP1_HWIP, 0)); @@ -436,17 +454,26 @@ int smu_v14_0_init_smc_tables(struct smu_context *smu) ret = -ENOMEM; goto err3_out; } + + smu_table->user_overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->user_overdrive_table) { + ret = -ENOMEM; + goto err4_out; + } } smu_table->combo_pptable = kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); if (!smu_table->combo_pptable) { ret = -ENOMEM; - goto err4_out; + goto err5_out; } return 0; +err5_out: + kfree(smu_table->user_overdrive_table); err4_out: kfree(smu_table->boot_overdrive_table); err3_out: @@ -738,9 +765,12 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) struct amdgpu_device *adev = smu->adev; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + case IP_VERSION(14, 0, 4): + case IP_VERSION(14, 0, 5): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -833,32 +863,73 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, unsigned tyep, enum amdgpu_interrupt_state state) { + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t low, high; uint32_t val = 0; switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* For THM irqs */ - // TODO + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (smu->is_apu) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; case AMDGPU_IRQ_STATE_ENABLE: /* For THM irqs */ - // TODO + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, + smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, + smu->thermal_range.software_shutdown_temp); + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); + val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); - - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (smu->is_apu) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; default: @@ -868,11 +939,84 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, return 0; } +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ + static int smu_v14_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - // TODO + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + + /* + * ctxid is used to distinguish different + * events for SMCToHost interrupt. + */ + uint32_t ctxid = entry->src_data[0]; + uint32_t data; + uint32_t high; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); + break; + default: + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", + src_id); + break; + } + } else if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { + /* ACK SMUToHost interrupt */ + data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); + + switch (ctxid) { + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: + high = smu->thermal_range.software_shutdown_temp + + smu->thermal_range.software_shutdown_temp_offset; + high = min_t(typeof(high), + SMU_THERMAL_MAXIMUM_ALERT_TEMP, + high); + dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n", + high, + smu->thermal_range.software_shutdown_temp_offset); + + data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, + DIG_THERM_INTH, + (high & 0xff)); + data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); + break; + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: + high = min_t(typeof(high), + SMU_THERMAL_MAXIMUM_ALERT_TEMP, + smu->thermal_range.software_shutdown_temp); + dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high); + + data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, + DIG_THERM_INTH, + (high & 0xff)); + data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); + break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; + } + } + } return 0; } @@ -894,7 +1038,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) irq_src->num_types = 1; irq_src->funcs = &smu_v14_0_irq_funcs; - // TODO: THM related + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, SMU_IH_INTERRUPT_ID_TO_DRIVER, @@ -1003,7 +1157,8 @@ failed: int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1018,7 +1173,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1026,7 +1184,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1103,6 +1264,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1134,6 +1296,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1169,7 +1332,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1181,7 +1345,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1193,7 +1358,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1208,7 +1374,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1223,7 +1390,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1235,7 +1403,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1393,33 +1562,28 @@ int smu_v14_0_set_single_dpm_table(struct smu_context *smu, } int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; - int i, ret = 0; + int ret = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + if (adev->vcn.harvest_config & (1 << inst)) + return ret; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { - if (i == 0) - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, - i << 16U, NULL); - else if (i == 1) - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, - i << 16U, NULL); - } else { + if (smu->is_apu) { + if (inst == 0) ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - i << 16U, NULL); - } - - if (ret) - return ret; + SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, + inst << 16U, NULL); + else if (inst == 1) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, + inst << 16U, NULL); + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + inst << 16U, NULL); } return ret; @@ -1435,8 +1599,7 @@ int smu_v14_0_set_jpeg_enable(struct smu_context *smu, if (adev->jpeg.harvest_config & (1 << i)) continue; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (smu->is_apu) { if (i == 0) ret = smu_cmn_send_smc_msg_with_param(smu, enable ? SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, @@ -1590,23 +1753,27 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v14_0_baco_is_support(struct smu_context *smu) +int smu_v14_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return (bamaco_support |= BACO_SUPPORT); if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) @@ -1629,7 +1796,7 @@ int smu_v14_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - smu_baco->maco_support ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { @@ -1775,3 +1942,31 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, return ret; } +static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg(smu, + SMU_MSG_AllowIHHostInterrupt, + NULL); +} + +int smu_v14_0_enable_thermal_alert(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->irq_source.num_types) + return 0; + + ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); + if (ret) + return ret; + + return smu_v14_0_allow_ih_interrupt(smu); +} + +int smu_v14_0_disable_thermal_alert(struct smu_context *smu) +{ + if (!smu->irq_source.num_types) + return 0; + + return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 63399c00cc28..84f9b007b59f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -52,6 +52,26 @@ #define mmMP1_SMN_C2PMSG_90 0x029a #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 +/* MALLPowerController message arguments (Defines for the Cache mode control) */ +#define SMU_MALL_PMFW_CONTROL 0 +#define SMU_MALL_DRIVER_CONTROL 1 + +/* + * MALLPowerState message arguments + * (Defines for the Allocate/Release Cache mode if in driver mode) + */ +#define SMU_MALL_EXIT_PG 0 +#define SMU_MALL_ENTER_PG 1 + +#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON + +#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700 +#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678 +#define SMU_14_0_0_UMD_PSTATE_FCLK 1800 + +#define SMU_14_0_4_UMD_PSTATE_GFXCLK 938 +#define SMU_14_0_4_UMD_PSTATE_SOCCLK 938 + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ @@ -66,6 +86,12 @@ FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ FEATURE_MASK(FEATURE_VPE_DPM_BIT)) +enum smu_mall_pg_config { + SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1, + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2, +}; + static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), @@ -113,6 +139,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1), MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1), MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1), + MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1), + MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1), }; static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { @@ -362,6 +390,12 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v14_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, @@ -696,10 +730,10 @@ static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, uint32_t dpm_level, uint32_t *freq) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); return 0; } @@ -791,9 +825,11 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -828,7 +864,7 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -909,9 +945,11 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -942,7 +980,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -972,10 +1010,10 @@ static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, uint32_t *min, uint32_t *max) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); return 0; } @@ -993,9 +1031,15 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, case SMU_VCLK: member_type = METRICS_AVERAGE_VCLK; break; + case SMU_VCLK1: + member_type = METRICS_AVERAGE_VCLK1; + break; case SMU_DCLK: member_type = METRICS_AVERAGE_DCLK; break; + case SMU_DCLK1: + member_type = METRICS_AVERAGE_DCLK1; + break; case SMU_MCLK: member_type = METRICS_AVERAGE_UCLK; break; @@ -1077,10 +1121,10 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *count) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); return 0; } @@ -1088,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, ret = 0, size = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1124,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; @@ -1223,6 +1268,8 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; @@ -1241,13 +1288,76 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, return ret; } -static int smu_v14_0_0_set_performance_level(struct smu_context *smu, +static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type, + uint32_t *min_clk, + uint32_t *max_clk) +{ + uint32_t clk_limit = 0; + int ret = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + clk_limit = SMU_14_0_4_UMD_PSTATE_GFXCLK; + else + clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); + break; + case SMU_SOCCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + clk_limit = SMU_14_0_4_UMD_PSTATE_SOCCLK; + else + clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); + break; + case SMU_FCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else + clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); + break; + case SMU_VCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); + break; + case SMU_VCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &clk_limit); + break; + case SMU_DCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); + break; + case SMU_DCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &clk_limit); + break; + default: + ret = -EINVAL; + break; + } + *min_clk = *max_clk = clk_limit; + return ret; +} + +static int smu_v14_0_common_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct amdgpu_device *adev = smu->adev; uint32_t sclk_min = 0, sclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; + uint32_t vclk_min = 0, vclk_max = 0; + uint32_t dclk_min = 0, dclk_max = 0; + uint32_t vclk1_min = 0, vclk1_max = 0; + uint32_t dclk1_min = 0, dclk1_max = 0; int ret = 0; switch (level) { @@ -1255,28 +1365,54 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &dclk1_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; + vclk_min = vclk_max; + dclk_min = dclk_max; + vclk1_min = vclk1_max; + dclk1_min = dclk1_max; break; case AMD_DPM_FORCED_LEVEL_LOW: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; + vclk_max = vclk_min; + dclk_max = dclk_min; + vclk1_max = vclk1_min; + dclk1_max = dclk1_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - /* Temporarily do nothing since the optimal clocks haven't been provided yet */ + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1316,6 +1452,42 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } + if (vclk_min && vclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK, + vclk_min, + vclk_max); + if (ret) + return ret; + } + + if (vclk1_min && vclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK1, + vclk1_min, + vclk1_max); + if (ret) + return ret; + } + + if (dclk_min && dclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK, + dclk_min, + dclk_max); + if (ret) + return ret; + } + + if (dclk1_min && dclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK1, + dclk1_min, + dclk1_max); + if (ret) + return ret; + } + return ret; } @@ -1345,10 +1517,10 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); + else + smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); return 0; } @@ -1409,14 +1581,65 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks * static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_14_0_0_get_dpm_table(smu, clock_table); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_14_0_1_get_dpm_table(smu, clock_table); + else + smu_14_0_0_get_dpm_table(smu, clock_table); return 0; } +static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, + SMU_MALL_PMFW_CONTROL, NULL); + if (ret) { + dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n"); + return ret; + } + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, + SMU_MALL_DRIVER_CONTROL, NULL); + if (ret) { + dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n"); + return ret; + } + + if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, + SMU_MALL_EXIT_PG, NULL); + if (ret) { + dev_err(adev->dev, "EXIT MALL PG Failure\n"); + return ret; + } + } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, + SMU_MALL_ENTER_PG, NULL); + if (ret) { + dev_err(adev->dev, "Enter MALL PG Failure\n"); + return ret; + } + } + } + + return ret; +} + +static int smu_v14_0_common_set_mall_enable(struct smu_context *smu) +{ + enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT; + int ret = 0; + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config); + + return ret; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1442,12 +1665,13 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, - .set_performance_level = smu_v14_0_0_set_performance_level, + .set_performance_level = smu_v14_0_common_set_performance_level, .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, + .set_mall_enable = smu_v14_0_common_set_mall_enable, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c new file mode 100644 index 000000000000..82c2db972491 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -0,0 +1,2918 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/i2c.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v14_0.h" +#include "smu14_driver_if_v14_0.h" +#include "soc15_common.h" +#include "atom.h" +#include "smu_v14_0_2_ppt.h" +#include "smu_v14_0_2_pptable.h" +#include "smu_v14_0_2_ppsmc.h" +#include "mp/mp_14_0_2_offset.h" +#include "mp/mp_14_0_2_sh_mask.h" + +#include "smu_cmn.h" +#include "amdgpu_ras.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) + +#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 +#define DEBUGSMC_MSG_Mode1Reset 2 +#define LINK_SPEED_MAX 3 + +#define PP_OD_FEATURE_GFXCLK_FMIN 0 +#define PP_OD_FEATURE_GFXCLK_FMAX 1 +#define PP_OD_FEATURE_UCLK_FMIN 2 +#define PP_OD_FEATURE_UCLK_FMAX 3 +#define PP_OD_FEATURE_GFX_VF_CURVE 4 +#define PP_OD_FEATURE_FAN_CURVE_TEMP 5 +#define PP_OD_FEATURE_FAN_CURVE_PWM 6 +#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7 +#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 +#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 +#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 + +static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), + MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), + MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(VCLK, PPCLK_VCLK_0), + CLK_MAP(DCLK, PPCLK_DCLK_0), + CLK_MAP(DCEFCLK, PPCLK_DCFCLK), +}; + +static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(FW_DATA_READ), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_GFX_POWER_OPTIMIZER), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_FCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCN), + FEA_MAP(VMEMP_SCALING), + FEA_MAP(VDDIO_MEM_SCALING), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_FCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_DCFCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(FW_DSTATE), + FEA_MAP(GFXOFF), + FEA_MAP(BACO), + FEA_MAP(MM_DPM), + FEA_MAP(SOC_MPCLK_DS), + FEA_MAP(BACO_MPCLK_DS), + FEA_MAP(THROTTLERS), + FEA_MAP(SMARTSHIFT), + FEA_MAP(GTHR), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(GFX_DCS), + FEA_MAP(GFX_READ_MARGIN), + FEA_MAP(LED_DISPLAY), + FEA_MAP(GFXCLK_SPREAD_SPECTRUM), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(OPTIMIZED_VMIN), + FEA_MAP(GFX_IMU), + FEA_MAP(BOOT_TIME_CAL), + FEA_MAP(GFX_PCC_DFLL), + FEA_MAP(SOC_CG), + FEA_MAP(DF_CSTATE), + FEA_MAP(GFX_EDC), + FEA_MAP(BOOT_POWER_OPT), + FEA_MAP(CLOCK_POWER_DOWN_BYPASS), + FEA_MAP(DS_VCN), + FEA_MAP(BACO_CG), + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), + FEA_MAP(EDC_PWRBRK), + FEA_MAP(SOC_EDC_XVMIN), + FEA_MAP(GFX_PSM_DIDT), + FEA_MAP(APT_ALL_ENABLE), + FEA_MAP(APT_SQ_THROTTLE), + FEA_MAP(APT_PF_DCS), + FEA_MAP(GFX_EDC_XVMIN), + FEA_MAP(GFX_DIDT_XVMIN), + FEA_MAP(FAN_ABNORMAL), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, +}; + +static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, + TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), + TAB_MAP(OVERDRIVE), +}; + +static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), +}; + +static const uint8_t smu_v14_0_2_throttler_map[] = { + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), + [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), +}; + +static int +smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + struct amdgpu_device *adev = smu->adev; + /*u32 smu_version;*/ + + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0xff, sizeof(uint32_t) * num); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); + } +#if 0 + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); + + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + /* PMFW 78.58 contains a critical fix for gfxoff feature */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x004e3a00) || + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); + + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); + } + + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); + } + + if (!(adev->pm.pp_feature & PP_ULV_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); +#endif + + return 0; +} + +static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { + smu_baco->platform_support = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) + smu_baco->maco_support = true; + } + + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + /* + * Instead of having its own buffer space and get overdrive_table copied, + * smu->od_settings just points to the actual overdrive_table + */ + smu->od_settings = &powerplay_table->overdrive_table; + + smu->adev->pm.no_fan = + !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); + + return 0; +} + +static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + return 0; +} + +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_2_powerplay_table); + + return 0; +} + +static int smu_v14_0_2_setup_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + int ret = 0; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + &smu_table->power_play_table, + &smu_table->power_play_table_size); + if (ret) + return ret; + + ret = smu_v14_0_2_store_powerplay_table(smu); + if (ret) + return ret; + + ret = smu_v14_0_2_check_powerplay_table(smu); + if (ret) + return ret; + + return ret; +} + +static int smu_v14_0_2_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + goto err2_out; + + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + goto err3_out; + + return 0; + +err3_out: + kfree(smu_table->watermarks_table); +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); + + return 0; +} + +static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v14_0_2_tables_init(smu); + if (ret) + return ret; + + ret = smu_v14_0_2_allocate_dpm_context(smu); + if (ret) + return ret; + + return smu_v14_0_init_smc_tables(smu); +} + +static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + struct smu_14_0_dpm_table *dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t link_level; + int ret = 0; + + /* socclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.soc_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_SOCCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_GFXCLK, + dpm_table); + if (ret) + return ret; + + /* + * Update the reported maximum shader clock to the value + * which can be guarded to be achieved on all cards. This + * is aligned with Window setting. And considering that value + * might be not the peak frequency the card can achieve, it + * is normal some real-time clock frequency can overtake this + * labelled maximum clock frequency(for example in pp_dpm_sclk + * sysfs output). + */ + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* uclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_UCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* fclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_FCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* vclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_VCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* dclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* lclk dpm table setup */ + pcie_table = &dpm_context->dpm_tables.pcie_table; + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + + if (link_level == 0) + link_level++; + } + + /* dcefclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dcef_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCEFCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + return 0; +} + +static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint64_t feature_enabled; + + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) +{ + uint32_t throttler_status = 0; + int i; + + for (i = 0; i < THROTTLER_COUNT; i++) + throttler_status |= + (metrics->ThrottlingPercentage[i] ? 1U << i : 0); + + return throttler_status; +} + +#define SMU_14_0_2_BUSY_THRESHOLD 5 +static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) + return ret; + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK_0]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK_0]; + break; + case METRICS_CURR_FCLK: + *value = metrics->CurrClock[PPCLK_FCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPostDs; + else + *value = metrics->AverageGfxclkFrequencyPreDs; + break; + case METRICS_AVERAGE_FCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageFclkFrequencyPostDs; + else + *value = metrics->AverageFclkFrequencyPreDs; + break; + case METRICS_AVERAGE_UCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageMemclkFrequencyPostDs; + else + *value = metrics->AverageMemclkFrequencyPreDs; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->AverageVclk0Frequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->AverageDclk0Frequency; + break; + case METRICS_AVERAGE_VCLK1: + *value = metrics->AverageVclk1Frequency; + break; + case METRICS_AVERAGE_DCLK1: + *value = metrics->AverageDclk1Frequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = max(metrics->AverageVcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->AvgTemperature[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->AvgTemperature[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->AvgTemperature[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->AvgTemperature[TEMP_VR_GFX] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->AvgTemperature[TEMP_VR_SOC] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = smu_v14_0_2_get_throttler_status(metrics); + break; + case METRICS_CURR_FANSPEED: + *value = metrics->AvgFanRpm; + break; + case METRICS_CURR_FANPWM: + *value = metrics->AvgFanPwm; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + break; + case METRICS_PCIE_RATE: + *value = metrics->PcieRate; + break; + case METRICS_PCIE_WIDTH: + *value = metrics->PcieWidth; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + +static int smu_v14_0_2_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + int ret = 0; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_MEMACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_MEM, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + int clk_id = 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return -EINVAL; + + switch (clk_id) { + case PPCLK_GFXCLK: + member_type = METRICS_AVERAGE_GFXCLK; + break; + case PPCLK_UCLK: + member_type = METRICS_CURR_UCLK; + break; + case PPCLK_FCLK: + member_type = METRICS_CURR_FCLK; + break; + case PPCLK_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case PPCLK_VCLK_0: + member_type = METRICS_AVERAGE_VCLK; + break; + case PPCLK_DCLK_0: + member_type = METRICS_AVERAGE_DCLK; + break; + case PPCLK_DCFCLK: + member_type = METRICS_CURR_DCEFCLK; + break; + default: + return -EINVAL; + } + + return smu_v14_0_2_get_smu_metrics_data(smu, + member_type, + value); +} + +static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu, + int od_feature_bit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + + return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); +} + +static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, + int od_feature_bit, + int32_t *min, + int32_t *max) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + int32_t od_min_setting, od_max_setting; + + switch (od_feature_bit) { + case PP_OD_FEATURE_GFXCLK_FMIN: + case PP_OD_FEATURE_GFXCLK_FMAX: + od_min_setting = overdrive_lowerlimits->GfxclkFoffset; + od_max_setting = overdrive_upperlimits->GfxclkFoffset; + break; + case PP_OD_FEATURE_UCLK_FMIN: + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + break; + case PP_OD_FEATURE_UCLK_FMAX: + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + break; + case PP_OD_FEATURE_GFX_VF_CURVE: + od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0]; + od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_TEMP: + od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_PWM: + od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0]; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT: + od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET: + od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold; + break; + case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE: + od_min_setting = overdrive_lowerlimits->FanTargetTemperature; + od_max_setting = overdrive_upperlimits->FanTargetTemperature; + break; + case PP_OD_FEATURE_FAN_MINIMUM_PWM: + od_min_setting = overdrive_lowerlimits->FanMinimumPwm; + od_max_setting = overdrive_upperlimits->FanMinimumPwm; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + default: + od_min_setting = od_max_setting = INT_MAX; + break; + } + + if (min) + *min = od_min_setting; + if (max) + *max = od_max_setting; +} + +static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + struct smu_14_0_dpm_table *single_dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t gen_speed, lane_width; + int i, curr_freq, size = 0; + int32_t min_value, max_value; + int ret = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } + + switch (clk_type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + case SMU_DCEFCLK: + single_dpm_table = &(dpm_context->dpm_tables.dcef_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_SCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + case SMU_DCEFCLK: + ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); + if (ret) { + dev_err(smu->adev->dev, "Failed to get current clock freq!"); + return ret; + } + + if (single_dpm_table->is_fine_grained) { + /* + * For fine grained dpms, there are only two dpm levels: + * - level 0 -> min clock freq + * - level 1 -> max clock freq + * And the current clock frequency can be any value between them. + * So, if the current clock frequency is not at level 0 or level 1, + * we will fake it as three dpm levels: + * - level 0 -> min clock freq + * - level 1 -> current actual clock freq + * - level 2 -> max clock freq + */ + if ((single_dpm_table->dpm_levels[0].value != curr_freq) && + (single_dpm_table->dpm_levels[1].value != curr_freq)) { + size += sysfs_emit_at(buf, size, "0: %uMhz\n", + single_dpm_table->dpm_levels[0].value); + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", + curr_freq); + size += sysfs_emit_at(buf, size, "2: %uMhz\n", + single_dpm_table->dpm_levels[1].value); + } else { + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + single_dpm_table->dpm_levels[0].value, + single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + single_dpm_table->dpm_levels[1].value, + single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); + } + } else { + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); + } + break; + case SMU_PCIE: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_PCIE_RATE, + &gen_speed); + if (ret) + return ret; + + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_PCIE_WIDTH, + &lane_width); + if (ret) + return ret; + + pcie_table = &(dpm_context->dpm_tables.pcie_table); + for (i = 0; i < pcie_table->num_of_link_levels; i++) + size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : + (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : + (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : + (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : + (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "", + (pcie_table->pcie_lane[i] == 1) ? "x1" : + (pcie_table->pcie_lane[i] == 2) ? "x2" : + (pcie_table->pcie_lane[i] == 3) ? "x4" : + (pcie_table->pcie_lane[i] == 4) ? "x8" : + (pcie_table->pcie_lane[i] == 5) ? "x12" : + (pcie_table->pcie_lane[i] == 6) ? "x16" : + (pcie_table->pcie_lane[i] == 7) ? "x32" : "", + pcie_table->clk_freq[i], + (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && + (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? + "*" : ""); + break; + + case SMU_OD_SCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFXCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dMhz\n", + od_table->OverDriveTable.GfxclkFoffset); + break; + + case SMU_OD_MCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_UCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", + od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); + break; + + case SMU_OD_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dmV\n", + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); + break; + + case SMU_OD_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n"); + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) + size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n", + i, + (int)od_table->OverDriveTable.FanLinearTempPoints[i], + (int)od_table->OverDriveTable.FanLinearPwmPoints[i]); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n", + min_value, max_value); + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n", + min_value, max_value); + + break; + + case SMU_OD_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticLimitRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticTargetRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanTargetTemperature); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanMinimumPwm); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_RANGE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &min_value, + NULL); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", + min_value, max_value); + } + break; + + default: + break; + } + + return size; +} + +static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + uint32_t soft_min_level, soft_max_level; + uint32_t min_freq, max_freq; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + case SMU_UCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + if (single_dpm_table->is_fine_grained) { + /* There is only 2 levels for fine grained DPM */ + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } else { + if ((soft_max_level >= single_dpm_table->count) || + (soft_min_level >= single_dpm_table->count)) + return -EINVAL; + } + + min_freq = single_dpm_table->dpm_levels[soft_min_level].value; + max_freq = single_dpm_table->dpm_levels[soft_max_level].value; + + ret = smu_v14_0_set_soft_freq_limited_range(smu, + clk_type, + min_freq, + max_freq, + false); + break; + case SMU_DCEFCLK: + case SMU_PCIE: + default: + break; + } + + return ret; +} + +static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_14_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels = pcie_table->num_of_link_levels; + uint32_t smu_pcie_arg; + int ret, i; + + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap) + pcie_table->pcie_gen[i] = pcie_gen_cap; + if (pcie_table->pcie_lane[i] > pcie_width_cap) + pcie_table->pcie_lane[i] = pcie_width_cap; + } + } + + for (i = 0; i < num_of_levels; i++) { + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + return ret; + } + + return 0; +} + +static const struct smu_temperature_range smu14_thermal_policy[] = { + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + PPTable_t *pptable = smu->smu_table.driver_pptable; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!range) + return -EINVAL; + + memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range)); + + range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->software_shutdown_temp = powerplay_table->software_shutdown_temp; + range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset; + + return 0; +} + +static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) +{ + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_14_0_dpm_table *mem_table = + &dpm_context->dpm_tables.uclk_table; + struct smu_14_0_dpm_table *soc_table = + &dpm_context->dpm_tables.soc_table; + struct smu_14_0_dpm_table *vclk_table = + &dpm_context->dpm_tables.vclk_table; + struct smu_14_0_dpm_table *dclk_table = + &dpm_context->dpm_tables.dclk_table; + struct smu_14_0_dpm_table *fclk_table = + &dpm_context->dpm_tables.fclk_table; + struct smu_umd_pstate_table *pstate_table = + &smu->pstate_table; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + DriverReportedClocks_t driver_clocks = + pptable->SkuTable.DriverReportedClocks; + + pstate_table->gfxclk_pstate.min = gfx_table->min; + if (driver_clocks.GameClockAc && + (driver_clocks.GameClockAc < gfx_table->max)) + pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; + else + pstate_table->gfxclk_pstate.peak = gfx_table->max; + + pstate_table->uclk_pstate.min = mem_table->min; + pstate_table->uclk_pstate.peak = mem_table->max; + + pstate_table->socclk_pstate.min = soc_table->min; + pstate_table->socclk_pstate.peak = soc_table->max; + + pstate_table->vclk_pstate.min = vclk_table->min; + pstate_table->vclk_pstate.peak = vclk_table->max; + + pstate_table->dclk_pstate.min = dclk_table->min; + pstate_table->dclk_pstate.peak = dclk_table->max; + + pstate_table->fclk_pstate.min = fclk_table->min; + pstate_table->fclk_pstate.peak = fclk_table->max; + + if (driver_clocks.BaseClockAc && + driver_clocks.BaseClockAc < gfx_table->max) + pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; + else + pstate_table->gfxclk_pstate.standard = gfx_table->max; + pstate_table->uclk_pstate.standard = mem_table->max; + pstate_table->socclk_pstate.standard = soc_table->min; + pstate_table->vclk_pstate.standard = vclk_table->min; + pstate_table->dclk_pstate.standard = dclk_table->min; + pstate_table->fclk_pstate.standard = fclk_table->min; + + return 0; +} + +static void smu_v14_0_2_get_unique_id(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + struct amdgpu_device *adev = smu->adev; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) + goto out; + + upper32 = metrics->PublicSerialNumberUpper; + lower32 = metrics->PublicSerialNumberLower; + +out: + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; +} + +static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + if (!speed) + return -EINVAL; + + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANPWM, + speed); + if (ret) { + dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); + return ret; + } + + /* Convert the PMFW output which is in percent to pwm(255) based */ + *speed = min(*speed * 255 / 100, (uint32_t)255); + + return 0; +} + +static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + if (!speed) + return -EINVAL; + + return smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); +} + +static int smu_v14_0_2_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + CustomSkuTable_t *skutable = &pptable->CustomSkuTable; + uint32_t power_limit; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + + if (smu_v14_0_get_current_power_limit(smu, &power_limit)) + power_limit = smu->adev->pm.ac_power ? + skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : + skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; + + if (current_power_limit) + *current_power_limit = power_limit; + if (default_power_limit) + *default_power_limit = power_limit; + + if (max_power_limit) + *max_power_limit = msg_limit; + + if (min_power_limit) + *min_power_limit = 0; + + return 0; +} + +static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int16_t workload_type = 0; + uint32_t i, size = 0; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9]); + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + i); + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) + return -EINVAL; + + result = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, + (void *)(&activity_monitor_external), + false); + if (result) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor->Gfx_FPS, + activity_monitor->Gfx_MinActiveFreqType, + activity_monitor->Gfx_MinActiveFreq, + activity_monitor->Gfx_BoosterFreqType, + activity_monitor->Gfx_BoosterFreq, + activity_monitor->Gfx_PD_Data_limit_c, + activity_monitor->Gfx_PD_Data_error_coeff, + activity_monitor->Gfx_PD_Data_error_rate_coeff); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "FCLK", + activity_monitor->Fclk_FPS, + activity_monitor->Fclk_MinActiveFreqType, + activity_monitor->Fclk_MinActiveFreq, + activity_monitor->Fclk_BoosterFreqType, + activity_monitor->Fclk_BoosterFreq, + activity_monitor->Fclk_PD_Data_limit_c, + activity_monitor->Fclk_PD_Data_error_coeff, + activity_monitor->Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9 +#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2 +#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long)) + +static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu, + long *input) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + int ret, idx; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Gfxclk */ + activity_monitor->Gfx_FPS = input[idx + 1]; + activity_monitor->Gfx_MinActiveFreqType = input[idx + 2]; + activity_monitor->Gfx_MinActiveFreq = input[idx + 3]; + activity_monitor->Gfx_BoosterFreqType = input[idx + 4]; + activity_monitor->Gfx_BoosterFreq = input[idx + 5]; + activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8]; + } + idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + if (input[idx]) { + /* Fclk */ + activity_monitor->Fclk_FPS = input[idx + 1]; + activity_monitor->Fclk_MinActiveFreqType = input[idx + 2]; + activity_monitor->Fclk_MinActiveFreq = input[idx + 3]; + activity_monitor->Fclk_BoosterFreqType = input[idx + 4]; + activity_monitor->Fclk_BoosterFreq = input[idx + 5]; + activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8]; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + + return ret; +} + +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + u32 workload_mask, + long *custom_params, + u32 custom_params_max_idx) +{ + u32 backend_workload_mask = 0; + int ret, idx = -1, i; + + smu_cmn_get_backend_workload_mask(smu, workload_mask, + &backend_workload_mask); + + /* disable deep sleep if compute is enabled */ + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) + smu_v14_0_deep_sleep_control(smu, false); + else + smu_v14_0_deep_sleep_control(smu, true); + + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) { + if (!smu->custom_profile_params) { + smu->custom_profile_params = + kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL); + if (!smu->custom_profile_params) + return -ENOMEM; + } + if (custom_params && custom_params_max_idx) { + if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT) + return -EINVAL; + if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT) + return -EINVAL; + idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT; + smu->custom_profile_params[idx] = 1; + for (i = 1; i < custom_params_max_idx; i++) + smu->custom_profile_params[idx + i] = custom_params[i]; + } + ret = smu_v14_0_2_set_power_profile_mode_coeff(smu, + smu->custom_profile_params); + if (ret) { + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + } else if (smu->custom_profile_params) { + memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE); + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + backend_workload_mask, NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", + workload_mask); + if (idx != -1) + smu->custom_profile_params[idx] = 0; + return ret; + } + + return ret; +} + +static int smu_v14_0_2_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v14_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v14_0_baco_enter(smu); +} + +static int smu_v14_0_2_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v14_0_baco_exit(smu); + } +} + +static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) +{ + // TODO + + return true; +} + +static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num_msgs) +{ + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; + int i, j, r, c; + u16 dir; + + if (!adev->pm.dpm_enabled) + return -EBUSY; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->I2CcontrollerPort = smu_i2c->port; + req->I2CSpeed = I2C_SPEED_FAST_400K; + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ + dir = msg[0].flags & I2C_M_RD; + + for (c = i = 0; i < num_msgs; i++) { + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; + + if (!(msg[i].flags & I2C_M_RD)) { + /* write */ + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; + cmd->ReadWriteData = msg[i].buf[j]; + } + + if ((dir ^ msg[i].flags) & I2C_M_RD) { + /* The direction changes. + */ + dir = msg[i].flags & I2C_M_RD; + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; + } + + req->NumCmds++; + + /* + * Insert STOP if we are at the last byte of either last + * message for the transaction or the client explicitly + * requires a STOP at this particular message. + */ + if ((j == msg[i].len - 1) && + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; + } + } + } + mutex_lock(&adev->pm.mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&adev->pm.mutex); + if (r) + goto fail; + + for (c = i = 0; i < num_msgs; i++) { + if (!(msg[i].flags & I2C_M_RD)) { + c += msg[i].len; + continue; + } + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; + + msg[i].buf[j] = cmd->ReadWriteData; + } + } + r = num_msgs; +fail: + kfree(req); + return r; +} + +static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { + .master_xfer = smu_v14_0_2_i2c_xfer, + .functionality = smu_v14_0_2_i2c_func, +}; + +static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, + .max_read_len = MAX_SW_I2C_COMMANDS, + .max_write_len = MAX_SW_I2C_COMMANDS, + .max_comb_1st_msg_len = 2, + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, +}; + +static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->dev.parent = &adev->pdev->dev; + control->algo = &smu_v14_0_2_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &smu_v14_0_2_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ + /* XXX ideally this would be something in a vbios data table */ + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + return res; +} + +static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; +} + +static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + +static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + +static int smu_v14_0_2_mode1_reset(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); + if (!ret) { + if (amdgpu_emu_mode == 1) + msleep(50000); + else + msleep(1000); + } + + return ret; +} + +static int smu_v14_0_2_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, + FEATURE_PWR_GFX, NULL); + else + return -EOPNOTSUPP; +} + +static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); + + smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53); + smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75); + smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54); +} + +static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_3 *gpu_metrics = + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; + SmuMetricsExternal_t metrics_ext; + SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + &metrics_ext, + true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); + + gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; + gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; + gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; + gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; + gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; + gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0], + metrics->AvgTemperature[TEMP_VR_MEM1]); + + gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; + gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + + gpu_metrics->average_socket_power = metrics->AverageSocketPower; + gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; + + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; + else + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; + + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; + else + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; + + gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; + gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; + gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; + gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; + + gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; + gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; + gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0]; + gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0]; + + gpu_metrics->throttle_status = + smu_v14_0_2_get_throttler_status(metrics); + gpu_metrics->indep_throttle_status = + smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, + smu_v14_0_2_throttler_map); + + gpu_metrics->current_fan_speed = metrics->AvgFanRpm; + + gpu_metrics->pcie_link_width = metrics->PcieWidth; + if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); + else + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC]; + gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM]; + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_3); +} + +static void smu_v14_0_2_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + +static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE | + OD_OPS_SUPPORT_FAN_CURVE_SET | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET; +} + +static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) +{ + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + OverDriveTableExternal_t *boot_od_table = + (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; + OverDriveTableExternal_t *user_od_table = + (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; + OverDriveTableExternal_t user_od_table_bak; + int ret; + int i; + + ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table); + if (ret) + return ret; + + smu_v14_0_2_dump_od_table(smu, boot_od_table); + + memcpy(od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, + user_od_table, + sizeof(OverDriveTableExternal_t)); + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + user_od_table->OverDriveTable.GfxclkFoffset = + user_od_table_bak.OverDriveTable.GfxclkFoffset; + user_od_table->OverDriveTable.UclkFmin = + user_od_table_bak.OverDriveTable.UclkFmin; + user_od_table->OverDriveTable.UclkFmax = + user_od_table_bak.OverDriveTable.UclkFmax; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = + user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) { + user_od_table->OverDriveTable.FanLinearTempPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearTempPoints[i]; + user_od_table->OverDriveTable.FanLinearPwmPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i]; + } + user_od_table->OverDriveTable.AcousticLimitRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold; + user_od_table->OverDriveTable.AcousticTargetRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold; + user_od_table->OverDriveTable.FanTargetTemperature = + user_od_table_bak.OverDriveTable.FanTargetTemperature; + user_od_table->OverDriveTable.FanMinimumPwm = + user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + } + + smu_v14_0_2_set_supported_od_feature_mask(smu); + + return 0; +} + +static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = table_context->overdrive_table; + OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; + int res; + + user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | + BIT(PP_OD_FEATURE_UCLK_BIT) | + BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | + BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); + user_od_table->OverDriveTable.FeatureCtrlMask = 0; + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); + + return res; +} + +static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *boot_overdrive_table = + (OverDriveTableExternal_t *)table_context->boot_overdrive_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + int i; + + switch (input) { + case PP_OD_EDIT_FAN_CURVE: + for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) { + od_table->OverDriveTable.FanLinearTempPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i]; + od_table->OverDriveTable.FanLinearPwmPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i]; + } + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_LIMIT: + od_table->OverDriveTable.AcousticLimitRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_TARGET: + od_table->OverDriveTable.AcousticTargetRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + od_table->OverDriveTable.FanTargetTemperature = + boot_overdrive_table->OverDriveTable.FanTargetTemperature; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_MINIMUM_PWM: + od_table->OverDriveTable.FanMinimumPwm = + boot_overdrive_table->OverDriveTable.FanMinimumPwm; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + default: + dev_info(adev->dev, "Invalid table index: %ld\n", input); + return -EINVAL; + } + + return 0; +} + +static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], + uint32_t size) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + uint32_t offset_of_voltageoffset; + int32_t minimum, maximum; + uint32_t feature_ctrlmask; + int i, ret = 0; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + if (size != 1) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n", + minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFoffset = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + case 1: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { + dev_err(adev->dev, + "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.UclkFmin, + (uint32_t)od_table->OverDriveTable.UclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + dev_warn(adev->dev, "Gfx offset setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 || + input[0] < 0) + return -EINVAL; + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &minimum, + &maximum); + if (input[1] < minimum || + input[1] > maximum) { + dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n", + input[1], minimum, maximum); + return -EINVAL; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &minimum, + &maximum); + if (input[2] < minimum || + input[2] > maximum) { + dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n", + input[2], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1]; + od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2]; + od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanTargetTemperature = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanMinimumPwm = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size == 1) { + ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); + if (ret) + return ret; + } else { + feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; + memcpy(od_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t)); + od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; + } + fallthrough; + case PP_OD_COMMIT_DPM_TABLE: + /* + * The member below instructs PMFW the settings focused in + * this single operation. + * `uint32_t FeatureCtrlMask;` + * It does not contain actual informations about user's custom + * settings. Thus we do not cache it. + */ + offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); + if (memcmp((u8 *)od_table + offset_of_voltageoffset, + table_context->user_overdrive_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { + smu_v14_0_2_dump_od_table(smu, od_table); + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + od_table->OverDriveTable.FeatureCtrlMask = 0; + memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, + (u8 *)od_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); + + if (!memcmp(table_context->user_overdrive_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t))) + smu->user_dpm_profile.user_od = false; + else + smu->user_dpm_profile.user_od = true; + } + break; + + default: + return -ENOSYS; + } + + return ret; +} + +static int smu_v14_0_2_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + int ret = 0; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + + if (limit <= msg_limit) { + if (smu->current_power_limit > msg_limit) { + od_table->OverDriveTable.Ppt = 0; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + } + return smu_v14_0_set_power_limit(smu, limit_type, limit); + } else if (smu->od_enabled) { + ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit); + if (ret) + return ret; + + od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + smu->current_power_limit = limit; + } else { + return -EINVAL; + } + + return 0; +} + +static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { + .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, + .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, + .i2c_init = smu_v14_0_2_i2c_control_init, + .i2c_fini = smu_v14_0_2_i2c_control_fini, + .is_dpm_running = smu_v14_0_2_is_dpm_running, + .init_microcode = smu_v14_0_init_microcode, + .load_microcode = smu_v14_0_load_microcode, + .fini_microcode = smu_v14_0_fini_microcode, + .init_smc_tables = smu_v14_0_2_init_smc_tables, + .fini_smc_tables = smu_v14_0_fini_smc_tables, + .init_power = smu_v14_0_init_power, + .fini_power = smu_v14_0_fini_power, + .check_fw_status = smu_v14_0_check_fw_status, + .setup_pptable = smu_v14_0_2_setup_pptable, + .check_fw_version = smu_v14_0_check_fw_version, + .set_driver_table_location = smu_v14_0_set_driver_table_location, + .system_features_control = smu_v14_0_system_features_control, + .set_allowed_mask = smu_v14_0_set_allowed_mask, + .get_enabled_mask = smu_cmn_get_enabled_mask, + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, + .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, + .read_sensor = smu_v14_0_2_read_sensor, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .print_clk_levels = smu_v14_0_2_print_clk_levels, + .force_clk_levels = smu_v14_0_2_force_clk_levels, + .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, + .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, + .register_irq_handler = smu_v14_0_register_irq_handler, + .enable_thermal_alert = smu_v14_0_enable_thermal_alert, + .disable_thermal_alert = smu_v14_0_disable_thermal_alert, + .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, + .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics, + .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .set_default_od_settings = smu_v14_0_2_set_default_od_settings, + .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings, + .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table, + .init_pptable_microcode = smu_v14_0_init_pptable_microcode, + .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, + .set_performance_level = smu_v14_0_set_performance_level, + .gfx_off_control = smu_v14_0_gfx_off_control, + .get_unique_id = smu_v14_0_2_get_unique_id, + .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm, + .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm, + .get_power_limit = smu_v14_0_2_get_power_limit, + .set_power_limit = smu_v14_0_2_set_power_limit, + .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, + .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, + .run_btc = smu_v14_0_run_btc, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .set_tool_table_location = smu_v14_0_set_tool_table_location, + .deep_sleep_control = smu_v14_0_deep_sleep_control, + .gfx_ulv_control = smu_v14_0_gfx_ulv_control, + .get_bamaco_support = smu_v14_0_get_bamaco_support, + .baco_get_state = smu_v14_0_baco_get_state, + .baco_set_state = smu_v14_0_baco_set_state, + .baco_enter = smu_v14_0_2_baco_enter, + .baco_exit = smu_v14_0_2_baco_exit, + .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, + .mode1_reset = smu_v14_0_2_mode1_reset, + .mode2_reset = smu_v14_0_2_mode2_reset, + .enable_gfx_features = smu_v14_0_2_enable_gfx_features, + .set_mp1_state = smu_v14_0_2_set_mp1_state, + .set_df_cstate = smu_v14_0_2_set_df_cstate, +#if 0 + .gpo_control = smu_v14_0_gpo_control, +#endif +}; + +void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; + smu->message_map = smu_v14_0_2_message_map; + smu->clock_map = smu_v14_0_2_clk_map; + smu->feature_map = smu_v14_0_2_feature_mask_map; + smu->table_map = smu_v14_0_2_table_map; + smu->pwr_src_map = smu_v14_0_2_pwr_src_map; + smu->workload_map = smu_v14_0_2_workload_map; + smu_v14_0_2_set_smu_mailbox_registers(smu); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h new file mode 100644 index 000000000000..b83729e5d6f9 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V14_0_2_PPT_H__ +#define __SMU_V14_0_2_PPT_H__ + +extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index b8dbd4e25348..7eaf58fd7f9a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -56,7 +56,7 @@ static const char * const __smu_message_names[] = { static const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) { - if (type < 0 || type >= SMU_MSG_MAX_COUNT) + if (type >= SMU_MSG_MAX_COUNT) return "unknown smu message"; return __smu_message_names[type]; @@ -235,6 +235,50 @@ static void __smu_cmn_send_msg(struct smu_context *smu, WREG32(smu->msg_reg, msg); } +static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu, + enum smu_message_type msg) +{ + return smu->message_map[msg].flags; +} + +static int __smu_cmn_ras_filter_msg(struct smu_context *smu, + enum smu_message_type msg, bool *poll) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t flags, resp; + bool fed_status; + + flags = __smu_cmn_get_msg_flags(smu, msg); + *poll = true; + + /* When there is RAS fatal error, FW won't process non-RAS priority + * messages. Don't allow any messages other than RAS priority messages. + */ + fed_status = amdgpu_ras_get_fed_status(adev); + if (fed_status) { + if (!(flags & SMU_MSG_RAS_PRI)) { + dev_dbg(adev->dev, + "RAS error detected, skip sending %s", + smu_get_message_name(smu, msg)); + return -EACCES; + } + + /* FW will ignore non-priority messages when a RAS fatal error + * is detected. Hence it is possible that a previous message + * wouldn't have got response. Allow to continue without polling + * for response status for priority messages. + */ + resp = RREG32(smu->resp_reg); + dev_dbg(adev->dev, + "Sending RAS priority message %s response status: %x", + smu_get_message_name(smu, msg), resp); + if (resp == 0) + *poll = false; + } + + return 0; +} + static int __smu_cmn_send_debug_msg(struct smu_context *smu, u32 msg, u32 param) @@ -271,11 +315,21 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, if (adev->no_hw_access) return 0; - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; goto Out; + } + + if (smu->smc_fw_state == SMU_FW_INIT) { + smu->smc_fw_state = SMU_FW_RUNTIME; + } else { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) + goto Out; + } + __smu_cmn_send_msg(smu, msg_index, param); res = 0; Out: @@ -306,6 +360,9 @@ int smu_cmn_wait_for_response(struct smu_context *smu) reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; + if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res && (res != -ETIME)) { amdgpu_device_halt(smu->adev); @@ -354,6 +411,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; int res, index; + bool poll = true; u32 reg; if (adev->no_hw_access) @@ -366,22 +424,42 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, return index == -EACCES ? 0 : index; mutex_lock(&smu->message_lock); - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) { - __smu_cmn_reg_print_error(smu, reg, index, param, msg); + + if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) { + res = __smu_cmn_ras_filter_msg(smu, msg, &poll); + if (res) + goto Out; + } + + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; goto Out; + } else if (smu->smc_fw_state == SMU_FW_INIT) { + /* Ignore initial smu response register value */ + poll = false; + smu->smc_fw_state = SMU_FW_RUNTIME; + } + + if (poll) { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) { + __smu_cmn_reg_print_error(smu, reg, index, param, msg); + goto Out; + } } __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res != 0) + if (res != 0) { + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; __smu_cmn_reg_print_error(smu, reg, index, param, msg); + } if (read_arg) { smu_cmn_read_arg(smu, read_arg); - dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ - readval: 0x%08x\n", + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n", smu_get_message_name(smu, msg), index, param, reg, *read_arg); } else { dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", @@ -437,7 +515,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return -EINVAL; if (amdgpu_sriov_vf(smu->adev) && - !msg_mapping.valid_in_vf) + !(msg_mapping.flags & SMU_MSG_VF_FLAG)) return -EACCES; return msg_mapping.map_to; @@ -707,7 +785,7 @@ static const char *__smu_feature_names[] = { static const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) { - if (feature < 0 || feature >= SMU_FEATURE_COUNT) + if (feature >= SMU_FEATURE_COUNT) return "unknown smu feature"; return __smu_feature_names[feature]; } @@ -715,7 +793,7 @@ static const char *smu_get_feature_name(struct smu_context *smu, size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, char *buf) { - int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; + int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; uint64_t feature_mask; int i, feature_index; uint32_t count = 0; @@ -973,64 +1051,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu) false); } -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) -{ - struct metrics_table_header *header = (struct metrics_table_header *)table; - uint16_t structure_size; - -#define METRICS_VERSION(a, b) ((a << 16) | b) - - switch (METRICS_VERSION(frev, crev)) { - case METRICS_VERSION(1, 0): - structure_size = sizeof(struct gpu_metrics_v1_0); - break; - case METRICS_VERSION(1, 1): - structure_size = sizeof(struct gpu_metrics_v1_1); - break; - case METRICS_VERSION(1, 2): - structure_size = sizeof(struct gpu_metrics_v1_2); - break; - case METRICS_VERSION(1, 3): - structure_size = sizeof(struct gpu_metrics_v1_3); - break; - case METRICS_VERSION(1, 4): - structure_size = sizeof(struct gpu_metrics_v1_4); - break; - case METRICS_VERSION(1, 5): - structure_size = sizeof(struct gpu_metrics_v1_5); - break; - case METRICS_VERSION(2, 0): - structure_size = sizeof(struct gpu_metrics_v2_0); - break; - case METRICS_VERSION(2, 1): - structure_size = sizeof(struct gpu_metrics_v2_1); - break; - case METRICS_VERSION(2, 2): - structure_size = sizeof(struct gpu_metrics_v2_2); - break; - case METRICS_VERSION(2, 3): - structure_size = sizeof(struct gpu_metrics_v2_3); - break; - case METRICS_VERSION(2, 4): - structure_size = sizeof(struct gpu_metrics_v2_4); - break; - case METRICS_VERSION(3, 0): - structure_size = sizeof(struct gpu_metrics_v3_0); - break; - default: - return; - } - -#undef METRICS_VERSION - - memset(header, 0xFF, structure_size); - - header->format_revision = frev; - header->content_revision = crev; - header->structure_size = structure_size; - -} - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -1079,3 +1099,85 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) return snd_driver_loaded; } + +static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level) +{ + if (level < 0 || !(policy->level_mask & BIT(level))) + return "Invalid"; + + switch (level) { + case SOC_PSTATE_DEFAULT: + return "soc_pstate_default"; + case SOC_PSTATE_0: + return "soc_pstate_0"; + case SOC_PSTATE_1: + return "soc_pstate_1"; + case SOC_PSTATE_2: + return "soc_pstate_2"; + } + + return "Invalid"; +} + +static struct smu_dpm_policy_desc pstate_policy_desc = { + .name = STR_SOC_PSTATE_POLICY, + .get_desc = smu_soc_policy_get_desc, +}; + +void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy) +{ + policy->desc = &pstate_policy_desc; +} + +static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy, + int level) +{ + if (level < 0 || !(policy->level_mask & BIT(level))) + return "Invalid"; + + switch (level) { + case XGMI_PLPD_DISALLOW: + return "plpd_disallow"; + case XGMI_PLPD_DEFAULT: + return "plpd_default"; + case XGMI_PLPD_OPTIMIZED: + return "plpd_optimized"; + } + + return "Invalid"; +} + +static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = { + .name = STR_XGMI_PLPD_POLICY, + .get_desc = smu_xgmi_plpd_policy_get_desc, +}; + +void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy) +{ + policy->desc = &xgmi_plpd_policy_desc; +} + +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask) +{ + int workload_type; + u32 profile_mode; + + *backend_workload_mask = 0; + + for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) { + if (!(workload_mask & (1 << profile_mode))) + continue; + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + profile_mode); + + if (workload_type < 0) + continue; + + *backend_workload_mask |= 1 << workload_type; + } +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 81bfce1406e5..7473672abd2a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,6 +40,30 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev, \ + typeof(*(ptr))); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)(ptr); \ + memset(header, 0xFF, sizeof(*(ptr))); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*(ptr)); \ + } while (0) + +#define smu_cmn_init_partition_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \ + typeof(*(ptr))); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)(ptr); \ + memset(header, 0xFF, sizeof(*(ptr))); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*(ptr)); \ + } while (0) + extern const int link_speed[]; /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ @@ -125,8 +149,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, int smu_cmn_get_combo_pptable(struct smu_context *smu); -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); @@ -144,6 +166,12 @@ static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) } bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); +void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); +void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); + +void smu_cmn_get_backend_workload_mask(struct smu_context *smu, + u32 workload_mask, + u32 *backend_workload_mask); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 6f4d212607d7..c09ecf1a68a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -78,7 +78,6 @@ #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu) #define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max) #define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level) -#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu) #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap) #define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src) #define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu) |