diff options
Diffstat (limited to 'drivers/cpufreq/cppc_cpufreq.c')
| -rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 525 |
1 files changed, 237 insertions, 288 deletions
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 022e3555407c..9eac77c4f294 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -16,60 +16,25 @@ #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/dmi.h> #include <linux/irq_work.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <uapi/linux/sched/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <acpi/cppc_acpi.h> -/* Minimum struct length needed for the DMI processor entry we want */ -#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 - -/* Offset in the DMI processor structure for the max frequency */ -#define DMI_PROCESSOR_MAX_SPEED 0x14 - -/* - * This list contains information parsed from per CPU ACPI _CPC and _PSD - * structures: e.g. the highest and lowest supported performance, capabilities, - * desired performance, level requested etc. Depending on the share_type, not - * all CPUs will have an entry in the list. - */ -static LIST_HEAD(cpu_data_list); - -static bool boost_supported; - -struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE + 1]; - char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; - u32 oem_revision; -}; - -static struct cppc_workaround_oem_info wa_info[] = { - { - .oem_id = "HISI ", - .oem_table_id = "HIP07 ", - .oem_revision = 0, - }, { - .oem_id = "HISI ", - .oem_table_id = "HIP08 ", - .oem_revision = 0, - } -}; - static struct cpufreq_driver cppc_cpufreq_driver; +#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE static enum { FIE_UNSET = -1, FIE_ENABLED, FIE_DISABLED } fie_disabled = FIE_UNSET; -#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE module_param(fie_disabled, int, 0444); MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)"); @@ -85,9 +50,7 @@ struct cppc_freq_invariance { static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); static struct kthread_worker *kworker_fie; -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); -static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs *fb_ctrs_t0, +static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1); /** @@ -123,8 +86,10 @@ static void cppc_scale_freq_workfn(struct kthread_work *work) return; } - perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, - &fb_ctrs); + perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs); + if (!perf) + return; + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; perf <<= SCHED_CAPACITY_SHIFT; @@ -177,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) init_irq_work(&cppc_fi->irq_work, cppc_irq_work); ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); - if (ret) { - pr_warn("%s: failed to read perf counters for cpu:%d: %d\n", - __func__, cpu, ret); - /* - * Don't abort if the CPU was offline while the driver - * was getting registered. - */ - if (cpu_online(cpu)) - return; + /* + * Don't abort as the CPU was offline while the driver was + * getting registered. + */ + if (ret && cpu_online(cpu)) { + pr_debug("%s: failed to read perf counters for cpu:%d: %d\n", + __func__, cpu, ret); + return; } } @@ -231,9 +195,9 @@ static void __init cppc_freq_invariance_init(void) * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. */ - .sched_runtime = 1000000, - .sched_deadline = 10000000, - .sched_period = 10000000, + .sched_runtime = NSEC_PER_MSEC, + .sched_deadline = 10 * NSEC_PER_MSEC, + .sched_period = 10 * NSEC_PER_MSEC, }; int ret; @@ -248,16 +212,20 @@ static void __init cppc_freq_invariance_init(void) if (fie_disabled) return; - kworker_fie = kthread_create_worker(0, "cppc_fie"); - if (IS_ERR(kworker_fie)) + kworker_fie = kthread_run_worker(0, "cppc_fie"); + if (IS_ERR(kworker_fie)) { + pr_warn("%s: failed to create kworker_fie: %ld\n", __func__, + PTR_ERR(kworker_fie)); + fie_disabled = FIE_DISABLED; return; + } ret = sched_setattr_nocheck(kworker_fie->task, &attr); if (ret) { pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__, ret); kthread_destroy_worker(kworker_fie); - return; + fie_disabled = FIE_DISABLED; } } @@ -267,7 +235,6 @@ static void cppc_freq_invariance_exit(void) return; kthread_destroy_worker(kworker_fie); - kworker_fie = NULL; } #else @@ -288,110 +255,17 @@ static inline void cppc_freq_invariance_exit(void) } #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ -/* Callback function used to retrieve the max frequency from DMI */ -static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) -{ - const u8 *dmi_data = (const u8 *)dm; - u16 *mhz = (u16 *)private; - - if (dm->type == DMI_ENTRY_PROCESSOR && - dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { - u16 val = (u16)get_unaligned((const u16 *) - (dmi_data + DMI_PROCESSOR_MAX_SPEED)); - *mhz = val > *mhz ? val : *mhz; - } -} - -/* Look up the max frequency in DMI */ -static u64 cppc_get_dmi_max_khz(void) -{ - u16 mhz = 0; - - dmi_walk(cppc_find_dmi_mhz, &mhz); - - /* - * Real stupid fallback value, just in case there is no - * actual value set. - */ - mhz = mhz ? mhz : 1; - - return (1000 * mhz); -} - -/* - * If CPPC lowest_freq and nominal_freq registers are exposed then we can - * use them to convert perf to freq and vice versa. The conversion is - * extrapolated as an affine function passing by the 2 points: - * - (Low perf, Low freq) - * - (Nominal perf, Nominal perf) - */ -static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, - unsigned int perf) -{ - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_freq - caps->lowest_freq; - div = caps->nominal_perf - caps->lowest_perf; - offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); - mul = max_khz; - div = caps->highest_perf; - } - - retval = offset + div64_u64(perf * mul, div); - if (retval >= 0) - return retval; - return 0; -} - -static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, - unsigned int freq) -{ - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_perf - caps->lowest_perf; - div = caps->nominal_freq - caps->lowest_freq; - offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); - mul = caps->highest_perf; - div = max_khz; - } - - retval = offset + div64_u64(freq * mul, div); - if (retval >= 0) - return retval; - return 0; -} - static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) - { struct cppc_cpudata *cpu_data = policy->driver_data; unsigned int cpu = policy->cpu; struct cpufreq_freqs freqs; - u32 desired_perf; int ret = 0; - desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); - /* Return if it is exactly the same perf */ - if (desired_perf == cpu_data->perf_ctrls.desired_perf) - return ret; - - cpu_data->perf_ctrls.desired_perf = desired_perf; + cpu_data->perf_ctrls.desired_perf = + cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); freqs.old = policy->cur; freqs.new = target_freq; @@ -414,7 +288,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy, u32 desired_perf; int ret; - desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); + desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); cpu_data->perf_ctrls.desired_perf = desired_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); @@ -433,6 +307,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) return 0; } +static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu) +{ + int transition_latency_ns = cppc_get_transition_latency(cpu); + + if (transition_latency_ns < 0) + return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC; + + return transition_latency_ns / NSEC_PER_USEC; +} + /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq @@ -455,19 +339,18 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) return 10000; } } - return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + return __cppc_cpufreq_get_transition_delay_us(cpu); } #else static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) { - return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + return __cppc_cpufreq_get_transition_delay_us(cpu); } #endif #if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL) static DEFINE_PER_CPU(unsigned int, efficiency_class); -static void cppc_cpufreq_register_em(struct cpufreq_policy *policy); /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */ #define CPPC_EM_CAP_STEP (20) @@ -517,6 +400,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev, struct cppc_cpudata *cpu_data; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -527,7 +413,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev, min_step = min_cap / CPPC_EM_CAP_STEP; max_step = max_cap / CPPC_EM_CAP_STEP; - perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + perf_prev = cppc_khz_to_perf(perf_caps, *KHz); step = perf_prev / perf_step; if (step > max_step) @@ -547,8 +433,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev, perf = step * perf_step; } - *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); - perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + *KHz = cppc_perf_to_khz(perf_caps, perf); + perf_check = cppc_khz_to_perf(perf_caps, *KHz); step_check = perf_check / perf_step; /* @@ -558,8 +444,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev, */ while ((*KHz == prev_freq) || (step_check != step)) { perf++; - *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); - perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + *KHz = cppc_perf_to_khz(perf_caps, perf); + perf_check = cppc_khz_to_perf(perf_caps, *KHz); step_check = perf_check / perf_step; } @@ -584,11 +470,14 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, int step; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); - perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz); + perf_prev = cppc_khz_to_perf(perf_caps, KHz); perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; step = perf_prev / perf_step; @@ -597,7 +486,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, return 0; } -static int populate_efficiency_class(void) +static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) +{ + struct cppc_cpudata *cpu_data; + struct em_data_callback em_cb = + EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost); + + cpu_data = policy->driver_data; + em_dev_register_perf_domain(get_cpu_device(policy->cpu), + get_perf_level_count(policy), &em_cb, + cpu_data->shared_cpu_map, 0); +} + +static void populate_efficiency_class(void) { struct acpi_madt_generic_interrupt *gicc; DECLARE_BITMAP(used_classes, 256) = {}; @@ -612,7 +513,7 @@ static int populate_efficiency_class(void) if (bitmap_weight(used_classes, 256) <= 1) { pr_debug("Efficiency classes are all equal (=%d). " "No EM registered", class); - return -EINVAL; + return; } /* @@ -629,26 +530,11 @@ static int populate_efficiency_class(void) index++; } cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em; - - return 0; -} - -static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) -{ - struct cppc_cpudata *cpu_data; - struct em_data_callback em_cb = - EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost); - - cpu_data = policy->driver_data; - em_dev_register_perf_domain(get_cpu_device(policy->cpu), - get_perf_level_count(policy), &em_cb, - cpu_data->shared_cpu_map, 0); } #else -static int populate_efficiency_class(void) +static void populate_efficiency_class(void) { - return 0; } #endif @@ -676,12 +562,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) goto free_mask; } - /* Convert the lowest and nominal freq from MHz to KHz */ - cpu_data->perf_caps.lowest_freq *= 1000; - cpu_data->perf_caps.nominal_freq *= 1000; - - list_add(&cpu_data->node, &cpu_data_list); - return cpu_data; free_mask: @@ -696,7 +576,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu_data = policy->driver_data; - list_del(&cpu_data->node); free_cpumask_var(cpu_data->shared_cpu_map); kfree(cpu_data); policy->driver_data = NULL; @@ -721,20 +600,17 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ - policy->min = cppc_cpufreq_perf_to_khz(cpu_data, - caps->lowest_nonlinear_perf); - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf); + policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ? + caps->highest_perf : caps->nominal_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance * available if userspace wants to use any perf between lowest & lowest * nonlinear perf */ - policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, - caps->lowest_perf); - policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf); + policy->cpuinfo.max_freq = policy->max; policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); policy->shared_type = cpu_data->shared_type; @@ -767,10 +643,10 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * is supported. */ if (caps->highest_perf > caps->nominal_perf) - boost_supported = true; + policy->boost_supported = true; /* Set policy->cur to max now. The governors will adjust later. */ - policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); + policy->cur = cppc_perf_to_khz(caps, caps->highest_perf); cpu_data->perf_ctrls.desired_perf = caps->highest_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); @@ -788,7 +664,7 @@ out: return ret; } -static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu_data = policy->driver_data; struct cppc_perf_caps *caps = &cpu_data->perf_caps; @@ -805,7 +681,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) caps->lowest_perf, cpu, ret); cppc_cpufreq_put_cpu_data(policy); - return 0; } static inline u64 get_delta(u64 t1, u64 t0) @@ -816,8 +691,7 @@ static inline u64 get_delta(u64 t1, u64 t0) return (u32)t1 - (u32)t0; } -static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs *fb_ctrs_t0, +static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1) { u64 delta_reference, delta_delivered; @@ -830,37 +704,71 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, delta_delivered = get_delta(fb_ctrs_t1->delivered, fb_ctrs_t0->delivered); - /* Check to avoid divide-by zero and invalid delivered_perf */ + /* + * Avoid divide-by zero and unchanged feedback counters. + * Leave it for callers to handle. + */ if (!delta_reference || !delta_delivered) - return cpu_data->perf_ctrls.desired_perf; + return 0; return (reference_perf * delta_delivered) / delta_reference; } +static int cppc_get_perf_ctrs_sample(int cpu, + struct cppc_perf_fb_ctrs *fb_ctrs_t0, + struct cppc_perf_fb_ctrs *fb_ctrs_t1) +{ + int ret; + + ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0); + if (ret) + return ret; + + udelay(2); /* 2usec delay between sampling */ + + return cppc_get_perf_ctrs(cpu, fb_ctrs_t1); +} + static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_cpudata *cpu_data; u64 delivered_perf; int ret; - cpufreq_cpu_put(policy); + if (!policy) + return 0; - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); - if (ret) - return ret; + cpu_data = policy->driver_data; - udelay(2); /* 2usec delay between sampling */ + ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1); + if (ret) { + if (ret == -EFAULT) + /* Any of the associated CPPC regs is 0. */ + goto out_invalid_counters; + else + return 0; + } - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); - if (ret) - return ret; + delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1); + if (!delivered_perf) + goto out_invalid_counters; + + return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); - delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, - &fb_ctrs_t1); +out_invalid_counters: + /* + * Feedback counters could be unchanged or 0 when a cpu enters a + * low-power idle state, e.g. clock-gated or power-gated. + * Use desired perf for reflecting frequency. Get the latest register + * value first as some platforms may update the actual delivered perf + * there; if failed, resort to the cached desired perf. + */ + if (cppc_get_desired_perf(cpu, &delivered_perf)) + delivered_perf = cpu_data->perf_ctrls.desired_perf; - return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); + return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); } static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) @@ -869,17 +777,10 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) struct cppc_perf_caps *caps = &cpu_data->perf_caps; int ret; - if (!boost_supported) { - pr_err("BOOST not supported by CPU or firmware\n"); - return -EINVAL; - } - if (state) - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->highest_perf); + policy->max = cppc_perf_to_khz(caps, caps->highest_perf); else - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->max = cppc_perf_to_khz(caps, caps->nominal_perf); policy->cpuinfo.max_freq = policy->max; ret = freq_qos_update_request(policy->max_freq_req, policy->max); @@ -895,15 +796,124 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); } + +static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf) +{ + bool val; + int ret; + + ret = cppc_get_auto_sel(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t store_auto_select(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + ret = cppc_set_auto_sel(policy->cpu, val); + if (ret) + return ret; + + return count; +} + +static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_auto_act_window(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_auto_act_window(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 usec; + int ret; + + ret = kstrtou64(buf, 0, &usec); + if (ret) + return ret; + + ret = cppc_set_auto_act_window(policy->cpu, usec); + if (ret) + return ret; + + return count; +} + +static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_epp_perf(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 val; + int ret; + + ret = kstrtou64(buf, 0, &val); + if (ret) + return ret; + + ret = cppc_set_epp(policy->cpu, val); + if (ret) + return ret; + + return count; +} + cpufreq_freq_attr_ro(freqdomain_cpus); +cpufreq_freq_attr_rw(auto_select); +cpufreq_freq_attr_rw(auto_act_window); +cpufreq_freq_attr_rw(energy_performance_preference_val); static struct freq_attr *cppc_cpufreq_attr[] = { &freqdomain_cpus, + &auto_select, + &auto_act_window, + &energy_performance_preference_val, NULL, }; static struct cpufreq_driver cppc_cpufreq_driver = { - .flags = CPUFREQ_CONST_LOOPS, + .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = cppc_verify_policy, .target = cppc_cpufreq_set_target, .get = cppc_cpufreq_get_rate, @@ -915,52 +925,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .name = "cppc_cpufreq", }; -/* - * HISI platform does not support delivered performance counter and - * reference performance counter. It can calculate the performance using the - * platform specific mechanism. We reuse the desired performance register to - * store the real performance calculated by the platform. - */ -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; - u64 desired_perf; - int ret; - - cpufreq_cpu_put(policy); - - ret = cppc_get_desired_perf(cpu, &desired_perf); - if (ret < 0) - return -EIO; - - return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); -} - -static void cppc_check_hisi_workaround(void) -{ - struct acpi_table_header *tbl; - acpi_status status = AE_OK; - int i; - - status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); - if (ACPI_FAILURE(status) || !tbl) - return; - - for (i = 0; i < ARRAY_SIZE(wa_info); i++) { - if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && - !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) { - /* Overwrite the get() callback */ - cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; - fie_disabled = FIE_DISABLED; - break; - } - } - - acpi_put_table(tbl); -} - static int __init cppc_cpufreq_init(void) { int ret; @@ -968,7 +932,6 @@ static int __init cppc_cpufreq_init(void) if (!acpi_cpc_valid()) return -ENODEV; - cppc_check_hisi_workaround(); cppc_freq_invariance_init(); populate_efficiency_class(); @@ -979,24 +942,10 @@ static int __init cppc_cpufreq_init(void) return ret; } -static inline void free_cpu_data(void) -{ - struct cppc_cpudata *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { - free_cpumask_var(iter->shared_cpu_map); - list_del(&iter->node); - kfree(iter); - } - -} - static void __exit cppc_cpufreq_exit(void) { cpufreq_unregister_driver(&cppc_cpufreq_driver); cppc_freq_invariance_exit(); - - free_cpu_data(); } module_exit(cppc_cpufreq_exit); |
