summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/amd-pstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/amd-pstate.c')
-rw-r--r--drivers/cpufreq/amd-pstate.c1767
1 files changed, 1489 insertions, 278 deletions
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index ace7d50cf2ac..c45bc98721d2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -36,7 +37,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
-#include <linux/amd-pstate.h>
+#include <linux/topology.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -45,141 +46,490 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
+
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+};
+static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX);
+
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
+{
+ if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX)
+ mode = AMD_PSTATE_UNDEFINED;
+ return amd_pstate_mode_string[mode];
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
+
+static struct cpufreq_driver *current_pstate_driver;
+static struct cpufreq_driver amd_pstate_driver;
+static struct cpufreq_driver amd_pstate_epp_driver;
+static int cppc_state = AMD_PSTATE_UNDEFINED;
+static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
- * TODO: We need more time to fine tune processors with shared memory solution
- * with community together.
- *
- * There are some performance drops on the CPU benchmarks which reports from
- * Suse. We are co-working with them to fine tune the shared memory solution. So
- * we disable it by default to go acpi-cpufreq on these processors and add a
- * module parameter to be able to enable it manually for debugging.
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+ * the frequency that a core is going to operate during
+ * short periods of activity. EPP values will be utilized for
+ * different OS profiles (balanced, performance, power savings)
+ * display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
*/
-static bool shared_mem = false;
-module_param(shared_mem, bool, 0444);
-MODULE_PARM_DESC(shared_mem,
- "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+ EPP_INDEX_MAX,
+};
-static struct cpufreq_driver amd_pstate_driver;
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+};
+static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX);
+
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0,
+ [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+};
+static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX);
+
+typedef int (*cppc_mode_transition_fn)(int);
+
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
+static inline int get_mode_idx_from_str(const char *str, size_t size)
+{
+ int i;
+
+ for (i = 0; i < AMD_PSTATE_MAX; i++) {
+ if (!strncmp(str, amd_pstate_mode_string[i], size))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static DEFINE_MUTEX(amd_pstate_driver_lock);
+
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
+{
+ u64 value;
+ int ret;
+
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
+ }
-static inline int pstate_enable(bool enable)
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
+}
+
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
+ return static_call(amd_pstate_get_epp)(cpudata);
}
-static int cppc_enable(bool enable)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
{
- int cpu, ret = 0;
+ u64 epp;
+ int ret;
+
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
+ }
+
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
+}
+
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
+
+ if (fast_switch) {
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
+ return 0;
+ } else {
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
}
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
+
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
+ bool fast_switch)
+{
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
+ max_perf, epp, fast_switch);
+}
+
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
+ int ret;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
+
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_enable(bool enable)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- return static_call(amd_pstate_enable)(enable);
+ return static_call(amd_pstate_set_epp)(policy, epp);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- u64 cap1;
- u32 highest_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
+
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
+ }
+
+ if (epp == epp_cached)
+ return 0;
+
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return ret;
+}
+
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
+{
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+}
+
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
+{
+ return cppc_set_enable(policy->cpu, 1);
+}
+
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
+
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
+{
+ return static_call(amd_pstate_cppc_enable)(policy);
+}
+
+static int msr_init_perf(struct amd_cpudata *cpudata)
+{
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
+
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
/*
- * TODO: Introduce AMD specific power feature.
- *
- * CPPC entry doesn't indicate the highest performance in some ASICs.
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
*/
- highest_perf = amd_get_highest_perf();
- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
- u32 highest_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- highest_perf = amd_get_highest_perf();
- if (highest_perf > cppc_perf.highest_perf)
- highest_perf = cppc_perf.highest_perf;
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
- return 0;
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
+ if (ret) {
+ pr_warn("failed to get auto_sel, ret: %d\n", ret);
+ return 0;
+ }
+
+ ret = cppc_set_auto_sel(cpudata->cpu,
+ (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
+
+ if (ret)
+ pr_warn("failed to set auto_sel, ret: %d\n", ret);
+
+ return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
-{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
-
-static void cppc_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(policy, epp);
+
+ if (ret)
+ return ret;
+ }
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
-}
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -188,8 +538,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -215,221 +565,417 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
- u64 value = prev;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ if (!policy)
+ return;
+
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
+ min_perf = des_perf;
+ des_perf = 0;
+ }
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- return;
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
+}
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
+{
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+
+ if (!policy)
+ return -EINVAL;
+
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ }
+
+ cpufreq_verify_within_cpu_limits(policy_data);
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return 0;
}
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_cpu_limits(policy);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- return 0;
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
+
+ WRITE_ONCE(cpudata->perf, perf);
}
-static int amd_pstate_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
+
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
- cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, false);
- cpufreq_freq_transition_end(policy, &freqs, false);
+ WARN_ON(fast_switch && !policy->fast_switch_enabled);
+ /*
+ * If fast_switch is desired, then there aren't any registered
+ * transition notifiers. See comment for
+ * cpufreq_enable_fast_switch().
+ */
+ if (!fast_switch)
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
+
+ if (!fast_switch)
+ cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
}
+static int amd_pstate_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ if (!amd_pstate_update_freq(policy, target_freq, true))
+ return target_freq;
+ return policy->cur;
+}
+
static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long _min_perf,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+
+ if (!policy)
+ return;
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ cpudata = policy->driver_data;
+
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cap_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
+ policy->governor->flags);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
+static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
- struct cppc_perf_caps cppc_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u32 nominal_freq, max_freq;
+ int ret = 0;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
+
+ if (on)
+ policy->cpuinfo.max_freq = max_freq;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
+
+ policy->max = policy->cpuinfo.max_freq;
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
+ if (ret < 0)
+ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
+ }
+
+ return ret < 0 ? ret : 0;
}
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EOPNOTSUPP;
+ }
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
+ ret = amd_pstate_cpu_boost_update(policy, state);
+ refresh_frequency_limits(policy);
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+ return ret;
+}
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
+{
+ u64 boost_val;
+ int ret = -1;
- /* Switch to khz */
- return max_freq * 1000;
+ /*
+ * If platform has no CPB support or disable it, initialize current driver
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
+ pr_debug_once("Boost CPB capabilities not present in the processor\n");
+ ret = 0;
+ goto exit_err;
+ }
+
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ if (ret) {
+ pr_err_once("failed to read initial CPU boost state!\n");
+ ret = -EIO;
+ goto exit_err;
+ }
+
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
+ cpudata->boost_supported = true;
+
+ return 0;
+
+exit_err:
+ cpudata->boost_supported = false;
+ return ret;
}
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+static void amd_perf_ctl_reset(unsigned int cpu)
{
- struct cppc_perf_caps cppc_perf;
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+#define CPPC_MAX_PERF U8_MAX
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+ /* user disabled or not detected */
+ if (!amd_pstate_prefcore)
+ return;
+
+ /* should use amd-hfi instead */
+ if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS) &&
+ IS_ENABLED(CONFIG_AMD_HFI)) {
+ amd_pstate_prefcore = false;
+ return;
+ }
+
+ cpudata->hw_prefcore = true;
+
+ /* Priorities must be initialized before ITMT support can be toggled on. */
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
+ struct amd_cpudata *cpudata;
+ u32 prev_high = 0, cur_high = 0;
+ bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ if (!amd_pstate_prefcore)
+ return;
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ if (amd_get_highest_perf(cpu, &cur_high))
+ return;
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ cpudata = policy->driver_data;
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ highest_perf_changed = (prev_high != cur_high);
+ if (highest_perf_changed) {
+ WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+ if (cur_high < CPPC_MAX_PERF) {
+ sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
+ }
+}
+
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns < 0) {
+ if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
+ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
+ else
+ return AMD_PSTATE_TRANSITION_DELAY;
+ }
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
+ return transition_delay_ns / NSEC_PER_USEC;
}
-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
{
- struct amd_cpudata *cpudata = policy->driver_data;
+ int transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency < 0)
+ return AMD_PSTATE_TRANSITION_LATENCY;
+
+ return transition_latency;
+}
+
+/*
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
+ *
+ * Requires: all perf members of @cpudata to be initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+{
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
+ struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
int ret;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
- return -EINVAL;
- }
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+ perf = READ_ONCE(cpudata->perf);
- if (state)
- policy->cpuinfo.max_freq = cpudata->max_freq;
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq;
else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
- policy->max = policy->cpuinfo.max_freq;
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
- ret = freq_qos_update_request(&cpudata->req[1],
- policy->cpuinfo.max_freq);
- if (ret < 0)
- return ret;
+ min_freq *= 1000;
- return 0;
-}
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
-{
- u32 highest_perf, nominal_perf;
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ /**
+ * Below values need to be initialized correctly, otherwise driver will fail to load
+ * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
+ * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
+ * Check _CPC in ACPI table objects if any values are incorrect
+ */
+ if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
+ pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
+ min_freq, max_freq, nominal_freq);
+ return -EINVAL;
+ }
- if (highest_perf <= nominal_perf)
- return;
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
+ pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
+ lowest_nonlinear_freq, min_freq, nominal_freq);
+ return -EINVAL;
+ }
- cpudata->boost_supported = true;
- amd_pstate_driver.boost_enabled = true;
+ return 0;
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
@@ -444,35 +990,42 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ amd_pstate_init_prefcore(cpudata);
+
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
- ret = -EINVAL;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
goto free_cpudata1;
- }
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
+
+ perf = READ_ONCE(cpudata->perf);
- policy->min = min_freq;
- policy->max = max_freq;
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- if (boot_cpu_has(X86_FEATURE_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_CPPC))
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
@@ -485,56 +1038,33 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
- amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
-static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
+ policy->fast_switch_possible = false;
kfree(cpudata);
-
- return 0;
-}
-
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
}
/* Sysfs attributes */
@@ -547,27 +1077,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = amd_get_max_freq(cpudata);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sprintf(&buf[0], "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sprintf(&buf[0], "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -577,46 +1107,669 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ struct amd_cpudata *cpudata;
+
+ cpudata = policy->driver_data;
+
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
+}
+
+static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u8 perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ perf = READ_ONCE(cpudata->prefcore_ranking);
+
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+ char *buf)
+{
+ bool hw_prefcore;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int offset = 0, i;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sysfs_emit_at(buf, offset, "%s\n",
+ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
+
+ for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]);
+
+ offset += sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+}
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ ssize_t ret;
+ u8 epp;
+
+ ret = sysfs_match_string(energy_perf_strings, buf);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
+
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
+
+ return ret ? ret : count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
struct amd_cpudata *cpudata = policy->driver_data;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
- perf = READ_ONCE(cpudata->highest_perf);
+ return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
+}
- return sprintf(&buf[0], "%u\n", perf);
+static void amd_pstate_driver_cleanup(void)
+{
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
+ cppc_state = AMD_PSTATE_DISABLE;
+ current_pstate_driver = NULL;
+}
+
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int amd_pstate_register_driver(int mode)
+{
+ int ret;
+
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
+
+ cppc_state = mode;
+
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ if (ret) {
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
+
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
+ return 0;
+}
+
+static int amd_pstate_unregister_driver(int dummy)
+{
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ return 0;
+}
+
+static int amd_pstate_change_mode_without_dvr_change(int mode)
+{
+ int cpu = 0;
+
+ cppc_state = mode;
+
+ if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
+ }
+
+ return 0;
+}
+
+static int amd_pstate_change_driver_mode(int mode)
+{
+ int ret;
+
+ ret = amd_pstate_unregister_driver(0);
+ if (ret)
+ return ret;
+
+ ret = amd_pstate_register_driver(mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
+ [AMD_PSTATE_DISABLE] = {
+ [AMD_PSTATE_DISABLE] = NULL,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
+ [AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
+ },
+ [AMD_PSTATE_PASSIVE] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = NULL,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
+ },
+ [AMD_PSTATE_ACTIVE] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_ACTIVE] = NULL,
+ [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
+ },
+ [AMD_PSTATE_GUIDED] = {
+ [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
+ [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
+ [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
+ [AMD_PSTATE_GUIDED] = NULL,
+ },
+};
+
+static ssize_t amd_pstate_show_status(char *buf)
+{
+ if (!current_pstate_driver)
+ return sysfs_emit(buf, "disable\n");
+
+ return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
+}
+
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
+int amd_pstate_update_status(const char *buf, size_t size)
+{
+ int mode_idx;
+
+ if (size > strlen("passive") || size < strlen("active"))
+ return -EINVAL;
+
+ mode_idx = get_mode_idx_from_str(buf, size);
+ if (mode_idx < 0)
+ return mode_idx;
+
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
+ return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_update_status);
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ guard(mutex)(&amd_pstate_driver_lock);
+
+ return amd_pstate_show_status(buf);
+}
+
+static ssize_t status_store(struct device *a, struct device_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ ret = amd_pstate_update_status(buf, p ? p - buf : count);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t prefcore_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
NULL,
};
+static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
+static struct attribute *pstate_global_attributes[] = {
+ &dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
+ NULL
+};
+
+static const struct attribute_group amd_pstate_global_attr_group = {
+ .name = "amd_pstate",
+ .attrs = pstate_global_attributes,
+};
+
+static bool amd_pstate_acpi_pm_profile_server(void)
+{
+ switch (acpi_gbl_FADT.preferred_profile) {
+ case PM_ENTERPRISE_SERVER:
+ case PM_SOHO_SERVER:
+ case PM_PERFORMANCE_SERVER:
+ return true;
+ }
+ return false;
+}
+
+static bool amd_pstate_acpi_pm_profile_undefined(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
+ return true;
+ if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
+ return true;
+ return false;
+}
+
+static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ amd_pstate_init_prefcore(cpudata);
+
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ perf = READ_ONCE(cpudata->perf);
+
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
+ policy->driver_data = cpudata;
+
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
+
+ /*
+ * Set the policy to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ if (amd_pstate_acpi_pm_profile_server() ||
+ amd_pstate_acpi_pm_profile_undefined()) {
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
+
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
+ if (ret)
+ return ret;
+
+ current_pstate_driver->adjust_perf = NULL;
+
+ return 0;
+
+free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
+ kfree(cpudata);
+ return ret;
+}
+
+static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+
+ kfree(cpudata);
+ policy->driver_data = NULL;
+ }
+
+ pr_debug("CPU %d exiting\n", policy->cpu);
+}
+
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf;
+ u8 epp;
+
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ epp = 0;
+ else
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+
+ perf = READ_ONCE(cpudata->perf);
+
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
+}
+
+static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ cpudata->policy = policy->policy;
+
+ ret = amd_pstate_epp_update_limit(policy, true);
+ if (ret)
+ return ret;
+
+ /*
+ * policy->cur is never updated with the amd_pstate_epp driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
+
+ return 0;
+}
+
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
+{
+ return amd_pstate_cppc_enable(policy);
+}
+
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
+}
+
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
+ if (ret)
+ return ret;
+
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
+
+ return 0;
+}
+
+static int amd_pstate_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
+
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
+}
+
+static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->suspended) {
+ int ret;
+
+ /* enable amd pstate from suspend state*/
+ ret = amd_pstate_epp_update_limit(policy, false);
+ if (ret)
+ return ret;
+
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
.target = amd_pstate_target,
+ .fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
+static struct cpufreq_driver amd_pstate_epp_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = amd_pstate_verify,
+ .setpolicy = amd_pstate_epp_set_policy,
+ .init = amd_pstate_epp_cpu_init,
+ .exit = amd_pstate_epp_cpu_exit,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_epp_resume,
+ .update_limits = amd_pstate_update_limits,
+ .set_boost = amd_pstate_set_boost,
+ .name = "amd-pstate-epp",
+ .attr = amd_pstate_epp_attr,
+};
+
+/*
+ * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
+ * show the debug message that helps to check if the CPU has CPPC support for loading issue.
+ */
+static bool amd_cppc_supported(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ bool warn = false;
+
+ if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
+ pr_debug_once("CPPC feature is not supported by the processor\n");
+ return false;
+ }
+
+ /*
+ * If the CPPC feature is disabled in the BIOS for processors
+ * that support MSR-based CPPC, the AMD Pstate driver may not
+ * function correctly.
+ *
+ * For such processors, check the CPPC flag and display a
+ * warning message if the platform supports CPPC.
+ *
+ * Note: The code check below will not abort the driver
+ * registration process because of the code is added for
+ * debugging purposes. Besides, it may still be possible for
+ * the driver to work using the shared-memory mechanism.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ switch (c->x86_model) {
+ case 0x60 ... 0x6F:
+ case 0x80 ... 0xAF:
+ warn = true;
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
+ cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ switch (c->x86_model) {
+ case 0x10 ... 0x1F:
+ case 0x40 ... 0xAF:
+ warn = true;
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
+ warn = true;
+ }
+ }
+
+ if (warn)
+ pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
+ "Please enable it if your BIOS has the CPPC option.\n");
+ return true;
+}
+
static int __init amd_pstate_init(void)
{
+ struct device *dev_root;
int ret;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
+ /* show debug message only if CPPC is not supported */
+ if (!amd_cppc_supported())
+ return -EOPNOTSUPP;
+
+ /* show warning message when BIOS broken or ACPI disabled */
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
@@ -626,44 +1779,102 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
+ /*
+ * determine the driver mode from the command line or kernel config.
+ * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
+ * command line options will override the kernel config settings.
+ */
+
+ if (cppc_state == AMD_PSTATE_UNDEFINED) {
+ /* Disable on the following configs by default:
+ * 1. Undefined platforms
+ * 2. Server platforms with CPUs older than Family 0x1A.
+ */
+ if (amd_pstate_acpi_pm_profile_undefined() ||
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
+ return -ENODEV;
+ }
+ /* get driver mode from kernel config option [1:4] */
+ cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
+ }
+
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
+ return -ENODEV;
+ }
+
/* capability check */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
- } else if (shared_mem) {
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
} else {
- pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
- return -ENODEV;
+ pr_debug("AMD CPPC shared memory based functionality is supported\n");
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
+ }
+
+ if (amd_pstate_prefcore) {
+ ret = amd_detect_prefcore(&amd_pstate_prefcore);
+ if (ret)
+ return ret;
}
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_register_driver(cppc_state);
if (ret) {
- pr_err("failed to enable amd-pstate with return %d\n", ret);
+ pr_err("failed to register with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(&amd_pstate_driver);
- if (ret)
- pr_err("failed to register amd_pstate_driver with return %d\n",
- ret);
+ dev_root = bus_get_dev_root(&cpu_subsys);
+ if (dev_root) {
+ ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
+ put_device(dev_root);
+ if (ret) {
+ pr_err("sysfs attribute export failed with error %d.\n", ret);
+ goto global_attr_free;
+ }
+ }
return ret;
+
+global_attr_free:
+ cpufreq_unregister_driver(current_pstate_driver);
+ return ret;
+}
+device_initcall(amd_pstate_init);
+
+static int __init amd_pstate_param(char *str)
+{
+ size_t size;
+ int mode_idx;
+
+ if (!str)
+ return -EINVAL;
+
+ size = strlen(str);
+ mode_idx = get_mode_idx_from_str(str, size);
+
+ return amd_pstate_set_driver(mode_idx);
}
-static void __exit amd_pstate_exit(void)
+static int __init amd_prefcore_param(char *str)
{
- cpufreq_unregister_driver(&amd_pstate_driver);
+ if (!strcmp(str, "disable"))
+ amd_pstate_prefcore = false;
- amd_pstate_enable(false);
+ return 0;
}
-module_init(amd_pstate_init);
-module_exit(amd_pstate_exit);
+early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
-MODULE_LICENSE("GPL");