summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/acpi-cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/acpi-cpufreq.c')
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c100
1 files changed, 59 insertions, 41 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 29904395e95f..e73a66785d69 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -50,8 +50,6 @@ enum {
#define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
-#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
-
struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
@@ -75,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -115,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -323,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -340,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
@@ -407,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy,
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
- udelay(10);
+ usleep_range(10, 15);
}
return 0;
}
@@ -628,7 +616,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -644,13 +639,22 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- highest_perf = amd_get_highest_perf();
- else
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+ } else {
highest_perf = perf_caps.highest_perf;
+ }
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq * 1000;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -663,8 +667,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -674,9 +682,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -826,16 +834,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
@@ -890,8 +902,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost)
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
+ if (acpi_cpufreq_driver.set_boost) {
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
+ }
return result;
@@ -906,7 +929,7 @@ err_free:
return result;
}
-static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -919,8 +942,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
free_cpumask_var(data->freqdomain_cpus);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
@@ -935,7 +956,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
@@ -975,7 +995,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
/* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver())
- return -EEXIST;
+ return -ENODEV;
pr_debug("%s\n", __func__);
@@ -1011,22 +1031,20 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int acpi_cpufreq_remove(struct platform_device *pdev)
+static void acpi_cpufreq_remove(struct platform_device *pdev)
{
pr_debug("%s\n", __func__);
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
-
- return 0;
}
static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove = acpi_cpufreq_remove,
+ .remove = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)