summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c1359
1 files changed, 742 insertions, 617 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e..edd9895b46c0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -38,6 +38,8 @@
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
#define MAX_NUM_OF_SUBSETS 8
+#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
+
struct od_attribute {
struct kobj_attribute attribute;
struct list_head entry;
@@ -96,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = {
};
/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ * @runpm: Check runpm status for suspend state checks.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
+{
+ bool runpm_check = runpm ? adev->in_runpm : false;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !runpm_check)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* Ignore runpm status. If device is in suspended state, deny access */
+ ret = amdgpu_pm_dev_state_check(adev, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow only if device is active. If runpm is disabled also, as in
+ * kernels without CONFIG_PM, allow access.
+ */
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (!ret)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
+/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
@@ -138,21 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -169,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -183,16 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -266,21 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
@@ -305,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -334,16 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
@@ -351,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -367,22 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
@@ -405,23 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -444,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -467,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
@@ -483,11 +505,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
if (ret)
@@ -506,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
@@ -537,21 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
@@ -573,21 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -756,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
@@ -806,11 +804,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
type,
@@ -829,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
@@ -858,16 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -886,8 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -918,25 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -953,23 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1022,16 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1040,8 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1091,25 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -1276,21 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1305,26 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1338,21 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1367,26 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1407,7 +1330,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
- * the heuristic parameters vary from family to family.
+ * the heuristic parameters vary from family to family. Additionally,
+ * you can apply the custom heuristics to different clock domains. Each
+ * clock domain is considered a distinct operation so if you modify the
+ * gfxclk heuristics and then the memclk heuristics, the all of the
+ * custom heuristics will be retained until you switch to another profile.
*
*/
@@ -1420,23 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1459,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1490,16 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
@@ -1513,22 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
return r;
- }
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -1582,6 +1485,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
}
/**
+ * DOC: vcn_busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the VCN
+ * is as a percentage. The file vcn_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ unsigned int value;
+ int r;
+
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/**
* DOC: pcie_bw
*
* The amdgpu driver provides a sysfs API for estimating how much data
@@ -1602,27 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
@@ -1645,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
@@ -1692,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1703,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -1744,11 +1651,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1756,8 +1661,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1781,20 +1685,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1818,21 +1720,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1859,16 +1753,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1880,8 +1767,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1977,21 +1863,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
@@ -2003,8 +1882,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
@@ -2046,9 +1925,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
- if (gc_ver == IP_VERSION(9, 4, 3)) {
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
+ if (gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_sriov_multi_vf_mode(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
@@ -2083,7 +1964,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
* setting should not be allowed from VF if not in one VF mode.
*/
if (gc_ver >= IP_VERSION(10, 0, 0) ||
- (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
+ (amdgpu_sriov_multi_vf_mode(adev))) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2091,63 +1972,223 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
return 0;
}
-/* Following items will be read out to indicate current plpd policy:
- * - -1: none
- * - 0: disallow
- * - 1: default
- * - 2: optimized
- */
-static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- char *mode_desc = "none";
- int mode;
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
+ uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ *states = ATTR_STATE_SUPPORTED;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (gc_ver < IP_VERSION(9, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (mp1_ver < IP_VERSION(10, 0, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
+ if (!((gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ switch (gc_ver) {
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ break;
+ default:
+ break;
+ }
- mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc);
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
- return sysfs_emit(buf, "%d: %s\n", mode, mode_desc);
+ return 0;
}
-/* Following argument value is expected from user to change plpd policy
- * - arg 0: disallow plpd
- * - arg 1: default policy
- * - arg 2: optimized policy
+/* pm policy attributes */
+struct amdgpu_pm_policy_attr {
+ struct device_attribute dev_attr;
+ enum pp_pm_policy id;
+};
+
+/**
+ * DOC: pm_policy
+ *
+ * Certain SOCs can support different power policies to optimize application
+ * performance. However, this policy is provided only at SOC level and not at a
+ * per-process level. This is useful especially when entire SOC is utilized for
+ * dedicated workload.
+ *
+ * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
+ * only two types of policies are supported through this interface.
+ *
+ * Pstate Policy Selection - This is to select different Pstate profiles which
+ * decides clock/throttling preferences.
+ *
+ * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
+ * this helps to select policy to be applied for per link power down.
+ *
+ * The list of available policies and policy levels vary between SOCs. They can
+ * be viewed under pm_policy node directory. If SOC doesn't support any policy,
+ * this node won't be available. The different policies supported will be
+ * available as separate nodes under pm_policy.
+ *
+ * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
+ *
+ * Reading the policy file shows the different levels supported. The level which
+ * is applied presently is denoted by * (asterisk). E.g.,
+ *
+ * .. code-block:: console
+ *
+ * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
+ * 0 : soc_pstate_default
+ * 1 : soc_pstate_0
+ * 2 : soc_pstate_1*
+ * 3 : soc_pstate_2
+ *
+ * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
+ * 0 : plpd_disallow
+ * 1 : plpd_default
+ * 2 : plpd_optimized*
+ *
+ * To apply a specific policy
+ *
+ * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
+ *
+ * For the levels listed in the example above, to select "plpd_optimized" for
+ * XGMI and "soc_pstate_2" for soc pstate policy -
+ *
+ * .. code-block:: console
+ *
+ * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
+ * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
+ *
*/
-static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- int mode, ret;
+ struct amdgpu_pm_policy_attr *policy_attr;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ policy_attr =
+ container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = kstrtos32(buf, 0, &mode);
- if (ret)
+ return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
+}
+
+static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_pm_policy_attr *policy_attr;
+ int ret, num_params = 0;
+ char delimiter[] = " \n\t";
+ char tmp_buf[128];
+ char *tmp, *param;
+ long val;
+
+ count = min(count, sizeof(tmp_buf));
+ memcpy(tmp_buf, buf, count);
+ tmp_buf[count - 1] = '\0';
+ tmp = tmp_buf;
+
+ tmp = skip_spaces(tmp);
+ while ((param = strsep(&tmp, delimiter))) {
+ if (!strlen(param)) {
+ tmp = skip_spaces(tmp);
+ continue;
+ }
+ ret = kstrtol(param, 0, &val);
+ if (ret)
+ return -EINVAL;
+ num_params++;
+ if (num_params > 1)
+ return -EINVAL;
+ }
+
+ if (num_params != 1)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ policy_attr =
+ container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
+
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
- ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode);
+ ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -2155,6 +2196,48 @@ static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
return count;
}
+#define AMDGPU_PM_POLICY_ATTR(_name, _id) \
+ static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
+ .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
+ amdgpu_set_pm_policy_attr), \
+ .id = PP_PM_POLICY_##_id, \
+ };
+
+#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
+
+AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
+AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
+
+static struct attribute *pm_policy_attrs[] = {
+ &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
+ &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
+ NULL
+};
+
+static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_pm_policy_attr *policy_attr;
+
+ policy_attr =
+ container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
+
+ if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
+ -ENOENT)
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_pm_policy_attr_group = {
+ .name = "pm_policy",
+ .attrs = pm_policy_attrs,
+ .is_visible = amdgpu_pm_policy_attr_visible,
+};
+
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2162,17 +2245,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_dcefclk_attr_update),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
+ .attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2180,6 +2272,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
.attr_update = pp_od_clk_voltage_attr_update),
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2192,7 +2285,6 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
.attr_update = ss_bias_attr_update),
- AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
.attr_update = amdgpu_pm_metrics_attr_update),
};
@@ -2201,28 +2293,37 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
- uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ enum amdgpu_device_attr_id attr_id = attr->attr_id;
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
- const char *attr_name = dev_attr->attr.name;
if (!(attr->flags & mask)) {
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
-#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
-
- if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (gc_ver < IP_VERSION(9, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (mp1_ver < IP_VERSION(10, 0, 0))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (DEVICE_ATTR_IS(mem_busy_percent)) {
if ((adev->flags & AMD_IS_APU &&
gc_ver != IP_VERSION(9, 4, 3)) ||
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
+ if (!(gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 5, 1) ||
+ gc_ver == IP_VERSION(11, 5, 2) ||
+ gc_ver == IP_VERSION(11, 5, 3) ||
+ gc_ver == IP_VERSION(12, 0, 0) ||
+ gc_ver == IP_VERSION(12, 0, 1)))
+ *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
if (adev->flags & AMD_IS_APU ||
@@ -2235,11 +2336,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2253,45 +2358,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(10, 1, 2) ||
- gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3) ||
- gc_ver == IP_VERSION(9, 4, 3)))
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
- if (!((gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 0) ||
- gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
- *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
else if ((gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
- if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
- *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
@@ -2304,23 +2376,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
- if (gc_ver == IP_VERSION(9, 4, 2) ||
- gc_ver == IP_VERSION(9, 4, 3))
- *states = ATTR_STATE_UNSUPPORTED;
}
switch (gc_ver) {
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
- if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
- DEVICE_ATTR_IS(pp_dpm_socclk) ||
- DEVICE_ATTR_IS(pp_dpm_fclk)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- break;
case IP_VERSION(10, 3, 0):
if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
amdgpu_sriov_vf(adev)) {
@@ -2332,14 +2390,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
break;
}
- /* setting should not be allowed from VF if not in one VF mode */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
-
-#undef DEVICE_ATTR_IS
-
return 0;
}
@@ -2573,21 +2623,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2605,11 +2647,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2623,16 +2660,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ ret = amdgpu_pm_get_access(adev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2663,20 +2697,13 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access(adev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -2691,8 +2718,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2708,21 +2734,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
return err;
- }
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2738,21 +2756,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
return err;
- }
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2802,21 +2812,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
return err;
- }
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2833,20 +2835,13 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access(adev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -2860,8 +2855,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2877,21 +2871,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2909,11 +2895,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2925,16 +2906,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access(adev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
@@ -2959,6 +2937,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2966,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3049,16 +3050,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
return r;
- }
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3068,8 +3062,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3130,11 +3123,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3145,16 +3133,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = amdgpu_pm_get_access(adev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_power_limit(adev, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -3325,6 +3310,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3372,6 +3359,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3432,7 +3421,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3468,7 +3458,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3)))) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
@@ -3506,17 +3496,29 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
- (gc_ver == IP_VERSION(9, 4, 3))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
/* only APUs other than gc 9,4,3 have vddnb */
- if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
+ if ((!(adev->flags & AMD_IS_APU) ||
+ (gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
@@ -3524,7 +3526,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
- (gc_ver != IP_VERSION(9, 4, 3)) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
@@ -3534,7 +3536,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* hotspot temperature for gc 9,4,3*/
- if (gc_ver == IP_VERSION(9, 4, 3)) {
+ if (gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
@@ -3584,23 +3588,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->dev);
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
return ret;
- }
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3668,11 +3664,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
@@ -3681,34 +3672,31 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_get_sync(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
- goto err_out0;
+ return ret;
ret = amdgpu_dpm_odn_edit_dpm_table(adev,
cmd_type,
parameter,
parameter_size);
if (ret)
- goto err_out1;
+ goto err_out;
if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
ret = amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
if (ret)
- goto err_out1;
+ goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
-err_out1:
- pm_runtime_mark_last_busy(adev->dev);
-err_out0:
- pm_runtime_put_autosuspend(adev->dev);
+err_out:
+ amdgpu_pm_put_access(adev);
return ret;
}
@@ -4015,6 +4003,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
return umode;
}
+/**
+ * DOC: fan_zero_rpm_enable
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * zero RPM feature.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them.
+ */
+static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
+}
+
+static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
+ buf,
+ count);
+}
+
+static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: fan_zero_rpm_stop_temperature
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * zero RPM stop temperature feature.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them.
+ *
+ * This setting works only if the Zero RPM setting is enabled. It adjusts the
+ * temperature below which the fan can stop.
+ */
+static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
+}
+
+static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
+ buf,
+ count);
+}
+
+static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
static struct od_feature_set amdgpu_od_set = {
.containers = {
[0] = {
@@ -4060,6 +4159,22 @@ static struct od_feature_set amdgpu_od_set = {
.store = fan_minimum_pwm_store,
},
},
+ [5] = {
+ .name = "fan_zero_rpm_enable",
+ .ops = {
+ .is_visible = fan_zero_rpm_enable_visible,
+ .show = fan_zero_rpm_enable_show,
+ .store = fan_zero_rpm_enable_store,
+ },
+ },
+ [6] = {
+ .name = "fan_zero_rpm_stop_temperature",
+ .ops = {
+ .is_visible = fan_zero_rpm_stop_temp_visible,
+ .show = fan_zero_rpm_stop_temp_show,
+ .store = fan_zero_rpm_stop_temp_store,
+ },
+ },
},
},
},
@@ -4261,6 +4376,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
@@ -4288,8 +4410,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
/* under multi-vf mode, the hwmon attributes are all not supported */
if (mode != SRIOV_VF_MODE_MULTI_VF) {
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
+ DRIVER_NAME, adev,
+ hwmon_groups);
if (IS_ERR(adev->pm.int_hwmon_dev)) {
ret = PTR_ERR(adev->pm.int_hwmon_dev);
dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
@@ -4322,6 +4444,16 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = amdgpu_od_set_init(adev);
if (ret)
goto err_out1;
+ } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
+ dev_info(adev->dev, "overdrive feature is not supported\n");
+ }
+
+ if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
+ -EOPNOTSUPP) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_pm_policy_attr_group);
+ if (ret)
+ goto err_out0;
}
adev->pm.sysfs_initialized = true;
@@ -4429,6 +4561,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* MEM Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
seq_printf(m, "MEM Load: %u %%\n", value);
+ /* VCN Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
+ seq_printf(m, "VCN Load: %u %%\n", value);
seq_printf(m, "\n");
@@ -4531,20 +4666,12 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(dev->dev);
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
return r;
- }
if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
@@ -4559,8 +4686,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -4580,10 +4706,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)