diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
-rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_pm.c | 988 |
1 files changed, 638 insertions, 350 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 39c5e1dfa275..77b1f061bbf0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -38,6 +38,8 @@ #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 #define MAX_NUM_OF_SUBSETS 8 +#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) + struct od_attribute { struct kobj_attribute attribute; struct list_head entry; @@ -143,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -183,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_power_state(adev, state); @@ -271,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -334,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { @@ -372,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); @@ -410,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -483,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_get_pp_num_states(adev, &data); if (ret) @@ -542,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (size <= 0) @@ -578,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pp_table(adev, buf, count); @@ -806,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } if (amdgpu_dpm_set_fine_grain_clk_vol(adev, type, @@ -863,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -886,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -927,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); @@ -958,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1027,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1040,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1100,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, if (ret) return ret; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_force_clock_level(adev, type, mask); @@ -1281,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1315,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1343,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1377,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); @@ -1407,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, * create a custom set of heuristics, write a string of numbers to the file * starting with the number of the custom profile along with a setting * for each heuristic parameter. Due to differences across asic families - * the heuristic parameters vary from family to family. + * the heuristic parameters vary from family to family. Additionally, + * you can apply the custom heuristics to different clock domains. Each + * clock domain is considered a distinct operation so if you modify the + * gfxclk heuristics and then the memclk heuristics, the all of the + * custom heuristics will be retained until you switch to another profile. * */ @@ -1425,17 +1383,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1490,11 +1445,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); @@ -1518,16 +1471,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1582,6 +1532,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, } /** + * DOC: vcn_busy_percent + * + * The amdgpu driver provides a sysfs API for reading how busy the VCN + * is as a percentage. The file vcn_busy_percent is used for this. + * The SMU firmware computes a percentage of load based on the + * aggregate activity level in the IP cores. + */ +static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + unsigned int value; + int r; + + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", value); +} + +/** * DOC: pcie_bw * * The amdgpu driver provides a sysfs API for estimating how much data @@ -1613,15 +1587,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%llu %llu %i\n", @@ -1744,11 +1715,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1756,7 +1725,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1781,14 +1749,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); dev_err(dev, "failed to update thermal limit\n"); return ret; } @@ -1823,15 +1791,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1864,11 +1829,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1880,7 +1843,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1982,11 +1944,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); + r = pm_runtime_resume_and_get(ddev->dev); + if (r < 0) return r; - } r = kstrtoint(buf, 10, &bias); if (r) @@ -2034,60 +1994,286 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ return 0; } -/* Following items will be read out to indicate current plpd policy: - * - -1: none - * - 0: disallow - * - 1: default - * - 2: optimized +static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!amdgpu_dpm_is_overdrive_supported(adev)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */ + if (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4)) { + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (!(attr->flags & mask)) + *states = ATTR_STATE_UNSUPPORTED; + + return 0; +} + +static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + uint32_t gc_ver; + + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + /* dcefclk node is not available on gfx 11.0.3 sriov */ + if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) || + gc_ver < IP_VERSION(9, 0, 0) || + !amdgpu_device_has_display_hardware(adev)) + *states = ATTR_STATE_UNSUPPORTED; + + /* SMU MP1 does not support dcefclk level setting, + * setting should not be allowed from VF if not in one VF mode. + */ + if (gc_ver >= IP_VERSION(10, 0, 0) || + (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + + return 0; +} + +static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + enum amdgpu_device_attr_id attr_id = attr->attr_id; + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (gc_ver < IP_VERSION(9, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (mp1_ver < IP_VERSION(10, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4)) + *states = ATTR_STATE_UNSUPPORTED; + } + + switch (gc_ver) { + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + break; + default: + break; + } + + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + + return 0; +} + +/* pm policy attributes */ +struct amdgpu_pm_policy_attr { + struct device_attribute dev_attr; + enum pp_pm_policy id; +}; + +/** + * DOC: pm_policy + * + * Certain SOCs can support different power policies to optimize application + * performance. However, this policy is provided only at SOC level and not at a + * per-process level. This is useful especially when entire SOC is utilized for + * dedicated workload. + * + * The amdgpu driver provides a sysfs API for selecting the policy. Presently, + * only two types of policies are supported through this interface. + * + * Pstate Policy Selection - This is to select different Pstate profiles which + * decides clock/throttling preferences. + * + * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI, + * this helps to select policy to be applied for per link power down. + * + * The list of available policies and policy levels vary between SOCs. They can + * be viewed under pm_policy node directory. If SOC doesn't support any policy, + * this node won't be available. The different policies supported will be + * available as separate nodes under pm_policy. + * + * cat /sys/bus/pci/devices/.../pm_policy/<policy_type> + * + * Reading the policy file shows the different levels supported. The level which + * is applied presently is denoted by * (asterisk). E.g., + * + * .. code-block:: console + * + * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate + * 0 : soc_pstate_default + * 1 : soc_pstate_0 + * 2 : soc_pstate_1* + * 3 : soc_pstate_2 + * + * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd + * 0 : plpd_disallow + * 1 : plpd_default + * 2 : plpd_optimized* + * + * To apply a specific policy + * + * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>" + * + * For the levels listed in the example above, to select "plpd_optimized" for + * XGMI and "soc_pstate_2" for soc pstate policy - + * + * .. code-block:: console + * + * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd + * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate + * */ -static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_pm_policy_attr(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - char *mode_desc = "none"; - int mode; + struct amdgpu_pm_policy_attr *policy_attr; + + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); if (amdgpu_in_reset(adev)) return -EPERM; if (adev->in_suspend && !adev->in_runpm) return -EPERM; - mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc); - - return sysfs_emit(buf, "%d: %s\n", mode, mode_desc); + return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf); } -/* Following argument value is expected from user to change plpd policy - * - arg 0: disallow plpd - * - arg 1: default policy - * - arg 2: optimized policy - */ -static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - int mode, ret; + struct amdgpu_pm_policy_attr *policy_attr; + int ret, num_params = 0; + char delimiter[] = " \n\t"; + char tmp_buf[128]; + char *tmp, *param; + long val; if (amdgpu_in_reset(adev)) return -EPERM; if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = kstrtos32(buf, 0, &mode); - if (ret) + count = min(count, sizeof(tmp_buf)); + memcpy(tmp_buf, buf, count); + tmp_buf[count - 1] = '\0'; + tmp = tmp_buf; + + tmp = skip_spaces(tmp); + while ((param = strsep(&tmp, delimiter))) { + if (!strlen(param)) { + tmp = skip_spaces(tmp); + continue; + } + ret = kstrtol(param, 0, &val); + if (ret) + return -EINVAL; + num_params++; + if (num_params > 1) + return -EINVAL; + } + + if (num_params != 1) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); + + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } - ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode); + ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -2098,6 +2284,48 @@ static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev, return count; } +#define AMDGPU_PM_POLICY_ATTR(_name, _id) \ + static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \ + .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \ + amdgpu_set_pm_policy_attr), \ + .id = PP_PM_POLICY_##_id, \ + }; + +#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr + +AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE) +AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD) + +static struct attribute *pm_policy_attrs[] = { + &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate), + &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd), + NULL +}; + +static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct amdgpu_pm_policy_attr *policy_attr; + + policy_attr = + container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr); + + if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) == + -ENOENT) + return 0; + + return attr->mode; +} + +const struct attribute_group amdgpu_pm_policy_attr_group = { + .name = "pm_policy", + .attrs = pm_policy_attrs, + .is_visible = amdgpu_pm_policy_attr_visible, +}; + static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2105,22 +2333,34 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_dcefclk_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC, + .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2133,7 +2373,6 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = ss_power_attr_update), AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, .attr_update = ss_bias_attr_update), - AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC, .attr_update = amdgpu_pm_metrics_attr_update), }; @@ -2142,39 +2381,32 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ uint32_t mask, enum amdgpu_device_attr_states *states) { struct device_attribute *dev_attr = &attr->dev_attr; - uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + enum amdgpu_device_attr_id attr_id = attr->attr_id; uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); - const char *attr_name = dev_attr->attr.name; if (!(attr->flags & mask)) { *states = ATTR_STATE_UNSUPPORTED; return 0; } -#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) - - if (DEVICE_ATTR_IS(pp_dpm_socclk)) { - if (gc_ver < IP_VERSION(9, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { - if (gc_ver < IP_VERSION(9, 0, 0) || - !amdgpu_device_has_display_hardware(adev)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { - if (mp1_ver < IP_VERSION(10, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { - *states = ATTR_STATE_UNSUPPORTED; - if (amdgpu_dpm_is_overdrive_supported(adev)) - *states = ATTR_STATE_SUPPORTED; - } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0))) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ - if (adev->flags & AMD_IS_APU) + if (adev->flags & AMD_IS_APU || + !adev->asic_funcs->get_pcie_usage) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(unique_id)) { switch (gc_ver) { @@ -2183,11 +2415,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): case IP_VERSION(10, 3, 0): case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): *states = ATTR_STATE_SUPPORTED; break; default: @@ -2201,45 +2436,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(gpu_metrics)) { if (gc_ver < IP_VERSION(9, 1, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; else if ((gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) { - if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_mclk_od)) { if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; @@ -2252,23 +2454,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { - if (gc_ver == IP_VERSION(9, 4, 2) || - gc_ver == IP_VERSION(9, 4, 3)) - *states = ATTR_STATE_UNSUPPORTED; } switch (gc_ver) { - case IP_VERSION(9, 4, 1): - case IP_VERSION(9, 4, 2): - /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ - if (DEVICE_ATTR_IS(pp_dpm_mclk) || - DEVICE_ATTR_IS(pp_dpm_socclk) || - DEVICE_ATTR_IS(pp_dpm_fclk)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - break; case IP_VERSION(10, 3, 0): if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && amdgpu_sriov_vf(adev)) { @@ -2280,22 +2468,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ break; } - if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { - /* SMU MP1 does not support dcefclk level setting */ - if (gc_ver >= IP_VERSION(10, 0, 0)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - } - - /* setting should not be allowed from VF if not in one VF mode */ - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - -#undef DEVICE_ATTR_IS - return 0; } @@ -2534,15 +2706,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -2579,11 +2748,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = pm_runtime_resume_and_get(adev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -2628,11 +2795,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2669,15 +2834,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2699,15 +2861,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2763,15 +2922,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2798,11 +2954,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2838,15 +2992,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -2881,11 +3032,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -3010,11 +3159,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3024,7 +3171,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; @@ -3101,11 +3247,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_power_limit(adev, value); @@ -3388,7 +3532,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* Skip crit temp on APU */ if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || - (gc_ver == IP_VERSION(9, 4, 3))) && + (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) return 0; @@ -3424,7 +3568,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && - (gc_ver != IP_VERSION(9, 4, 3)))) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) && (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power1_cap.dev_attr.attr || @@ -3462,13 +3606,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ - (gc_ver == IP_VERSION(9, 4, 3))) && + (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4))) && (attr == &sensor_dev_attr_in0_input.dev_attr.attr || attr == &sensor_dev_attr_in0_label.dev_attr.attr)) return 0; /* only APUs other than gc 9,4,3 have vddnb */ - if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) && + if ((!(adev->flags & AMD_IS_APU) || + (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4))) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; @@ -3480,7 +3627,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && - (gc_ver != IP_VERSION(9, 4, 3)) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) && (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || attr == &sensor_dev_attr_temp2_label.dev_attr.attr || attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || @@ -3490,7 +3637,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* hotspot temperature for gc 9,4,3*/ - if (gc_ver == IP_VERSION(9, 4, 3)) { + if (gc_ver == IP_VERSION(9, 4, 3) || + gc_ver == IP_VERSION(9, 4, 4)) { if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp1_label.dev_attr.attr) @@ -3545,17 +3693,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); return size; @@ -3637,23 +3782,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_get_sync(adev->dev); + ret = pm_runtime_resume_and_get(adev->dev); if (ret < 0) - goto err_out0; + return ret; ret = amdgpu_dpm_odn_edit_dpm_table(adev, cmd_type, parameter, parameter_size); if (ret) - goto err_out1; + goto err_out; if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { ret = amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); if (ret) - goto err_out1; + goto err_out; } pm_runtime_mark_last_busy(adev->dev); @@ -3661,9 +3806,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, return count; -err_out1: +err_out: pm_runtime_mark_last_busy(adev->dev); -err_out0: pm_runtime_put_autosuspend(adev->dev); return ret; @@ -3971,6 +4115,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) return umode; } +/** + * DOC: fan_zero_rpm_enable + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + */ +static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); +} + +static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + buf, + count); +} + +static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) + umode |= S_IWUSR; + + return umode; +} + +/** + * DOC: fan_zero_rpm_stop_temperature + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM stop temperature feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + * + * This setting works only if the Zero RPM setting is enabled. It adjusts the + * temperature below which the fan can stop. + */ +static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); +} + +static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, + buf, + count); +} + +static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) + umode |= S_IWUSR; + + return umode; +} + static struct od_feature_set amdgpu_od_set = { .containers = { [0] = { @@ -4016,6 +4271,22 @@ static struct od_feature_set amdgpu_od_set = { .store = fan_minimum_pwm_store, }, }, + [5] = { + .name = "fan_zero_rpm_enable", + .ops = { + .is_visible = fan_zero_rpm_enable_visible, + .show = fan_zero_rpm_enable_show, + .store = fan_zero_rpm_enable_store, + }, + }, + [6] = { + .name = "fan_zero_rpm_stop_temperature", + .ops = { + .is_visible = fan_zero_rpm_stop_temp_visible, + .show = fan_zero_rpm_stop_temp_show, + .store = fan_zero_rpm_stop_temp_store, + }, + }, }, }, }, @@ -4217,6 +4488,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev) } } + /* + * If gpu_od is the only member in the list, that means gpu_od is an + * empty directory, so remove it. + */ + if (list_is_singular(&adev->pm.od_kobj_list)) + goto err_out; + return 0; err_out: @@ -4244,8 +4522,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) /* under multi-vf mode, the hwmon attributes are all not supported */ if (mode != SRIOV_VF_MODE_MULTI_VF) { adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, - DRIVER_NAME, adev, - hwmon_groups); + DRIVER_NAME, adev, + hwmon_groups); if (IS_ERR(adev->pm.int_hwmon_dev)) { ret = PTR_ERR(adev->pm.int_hwmon_dev); dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret); @@ -4278,6 +4556,16 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) ret = amdgpu_od_set_init(adev); if (ret) goto err_out1; + } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) { + dev_info(adev->dev, "overdrive feature is not supported\n"); + } + + if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) != + -EOPNOTSUPP) { + ret = devm_device_add_group(adev->dev, + &amdgpu_pm_policy_attr_group); + if (ret) + goto err_out0; } adev->pm.sysfs_initialized = true; @@ -4385,6 +4673,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* MEM Load */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) seq_printf(m, "MEM Load: %u %%\n", value); + /* VCN Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) + seq_printf(m, "VCN Load: %u %%\n", value); seq_printf(m, "\n"); @@ -4496,11 +4787,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(dev->dev); + r = pm_runtime_resume_and_get(dev->dev); + if (r < 0) return r; - } if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); @@ -4515,7 +4804,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; |