diff options
Diffstat (limited to 'drivers/cpufreq')
77 files changed, 4470 insertions, 2955 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 94e55c40970a..78702a08364f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -217,8 +217,34 @@ config CPUFREQ_DT If in doubt, say N. +config CPUFREQ_DT_RUST + tristate "Rust based Generic DT based cpufreq driver" + depends on HAVE_CLK && OF && RUST + select CPUFREQ_DT_PLATDEV + select PM_OPP + help + This adds a Rust based generic DT based cpufreq driver for frequency + management. It supports both uniprocessor (UP) and symmetric + multiprocessor (SMP) systems. + + If in doubt, say N. + +config CPUFREQ_VIRT + tristate "Virtual cpufreq driver" + depends on GENERIC_ARCH_TOPOLOGY + help + This adds a virtualized cpufreq driver for guest kernels that + read/writes to a MMIO region for a virtualized cpufreq device to + communicate with the host. It sends performance requests to the host + which gets used as a hint to schedule vCPU threads and select CPU + frequency. If a VM does not support a virtualized FIE such as AMUs, + it updates the frequency scaling factor by polling host CPU frequency + to enable accurate Per-Entity Load Tracking for tasks running in the guest. + + If in doubt, say N. + config CPUFREQ_DT_PLATDEV - tristate "Generic DT based cpufreq platdev driver" + bool "Generic DT based cpufreq platdev driver" depends on OF help This adds a generic DT based cpufreq platdev driver for frequency @@ -231,9 +257,7 @@ if X86 source "drivers/cpufreq/Kconfig.x86" endif -if ARM || ARM64 source "drivers/cpufreq/Kconfig.arm" -endif if PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" @@ -262,6 +286,18 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_CPUFREQ + tristate "Loongson3 CPUFreq Driver" + help + This option adds a CPUFreq driver for Loongson processors which + support software configurable cpu frequency. + + Loongson-3 family processors support this feature. + + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" @@ -301,8 +337,6 @@ config QORIQ_CPUFREQ This adds the CPUFreq driver support for Freescale QorIQ SoCs which are capable of changing the CPU's frequency dynamically. -endif - config ACPI_CPPC_CPUFREQ tristate "CPUFreq driver based on the ACPI CPPC spec" depends on ACPI_PROCESSOR @@ -331,4 +365,6 @@ config ACPI_CPPC_CPUFREQ_FIE If in doubt, say N. +endif + endmenu diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 96b404ce829f..0d46402e3094 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -5,7 +5,7 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM tristate "Allwinner nvmem based SUN50I CPUFreq driver" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST depends on NVMEM_SUNXI_SID select PM_OPP help @@ -15,6 +15,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM To compile this driver as a module, choose M here: the module will be called sun50i-cpufreq-nvmem. +config ARM_AIROHA_SOC_CPUFREQ + tristate "Airoha EN7581 SoC CPUFreq support" + depends on ARCH_AIROHA || COMPILE_TEST + depends on OF + select PM_OPP + default ARCH_AIROHA + help + This adds the CPUFreq driver for Airoha EN7581 SoCs. + config ARM_APPLE_SOC_CPUFREQ tristate "Apple Silicon SoC CPUFreq support" depends on ARCH_APPLE || (COMPILE_TEST && 64BIT) @@ -26,15 +35,17 @@ config ARM_APPLE_SOC_CPUFREQ config ARM_ARMADA_37XX_CPUFREQ tristate "Armada 37xx CPUFreq support" - depends on ARCH_MVEBU && CPUFREQ_DT + depends on ARCH_MVEBU || COMPILE_TEST + depends on CPUFREQ_DT help This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. The Armada 37xx PMU supports 4 frequency and VDD levels. config ARM_ARMADA_8K_CPUFREQ tristate "Armada 8K CPUFreq driver" - depends on ARCH_MVEBU && CPUFREQ_DT - select ARMADA_AP_CPU_CLK + depends on ARCH_MVEBU || COMPILE_TEST + depends on CPUFREQ_DT + select ARMADA_AP_CPU_CLK if COMMON_CLK help This enables the CPUFreq driver support for Marvell Armada8k SOCs. @@ -56,7 +67,7 @@ config ARM_SCPI_CPUFREQ config ARM_VEXPRESS_SPC_CPUFREQ tristate "Versatile Express SPC based CPUfreq driver" depends on ARM_CPU_TOPOLOGY && HAVE_CLK - depends on ARCH_VEXPRESS_SPC + depends on ARCH_VEXPRESS_SPC || COMPILE_TEST select PM_OPP help This add the CPUfreq driver support for Versatile Express @@ -65,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ config ARM_BRCMSTB_AVS_CPUFREQ tristate "Broadcom STB AVS CPUfreq driver" depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST - default y + default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ help Some Broadcom STB SoCs use a co-processor running proprietary firmware ("AVS") to handle voltage and frequency scaling. This driver provides @@ -75,8 +86,9 @@ config ARM_BRCMSTB_AVS_CPUFREQ config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR - default m + depends on ARCH_HIGHBANK || COMPILE_TEST + depends on CPUFREQ_DT && REGULATOR && PL320_MBOX + default m if ARCH_HIGHBANK help This adds the CPUFreq driver for Calxeda Highbank SoC based boards. @@ -96,7 +108,8 @@ config ARM_IMX6Q_CPUFREQ config ARM_IMX_CPUFREQ_DT tristate "Freescale i.MX8M cpufreq support" - depends on ARCH_MXC && CPUFREQ_DT + depends on CPUFREQ_DT + depends on ARCH_MXC || COMPILE_TEST help This adds cpufreq driver support for Freescale i.MX7/i.MX8M series SoCs, based on cpufreq-dt. @@ -111,7 +124,8 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MEDIATEK_CPUFREQ tristate "CPU Frequency scaling support for MediaTek SoCs" - depends on ARCH_MEDIATEK && REGULATOR + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on REGULATOR select PM_OPP help This adds the CPUFreq driver support for MediaTek SoCs. @@ -119,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ config ARM_MEDIATEK_CPUFREQ_HW tristate "MediaTek CPUFreq HW driver" depends on ARCH_MEDIATEK || COMPILE_TEST - default m + default m if ARCH_MEDIATEK help Support for the CPUFreq HW driver. Some MediaTek chipsets have a HW engine to offload the steps @@ -130,12 +144,12 @@ config ARM_MEDIATEK_CPUFREQ_HW config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || COMPILE_TEST default ARCH_OMAP2PLUS config ARM_QCOM_CPUFREQ_NVMEM tristate "Qualcomm nvmem based CPUFreq" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST depends on NVMEM_QCOM_QFPROM depends on QCOM_SMEM select PM_OPP @@ -166,8 +180,8 @@ config ARM_RASPBERRYPI_CPUFREQ config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" - depends on CPU_S3C6410 - default y + depends on CPU_S3C6410 || COMPILE_TEST + default CPU_S3C6410 help This adds the CPUFreq driver for Samsung S3C6410 SoC. @@ -175,8 +189,8 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" - depends on CPU_S5PV210 - default y + depends on CPU_S5PV210 || COMPILE_TEST + default CPU_S5PV210 help This adds the CPUFreq driver for Samsung S5PV210 and S5PC110 SoCs. @@ -199,14 +213,15 @@ config ARM_SCMI_CPUFREQ config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" - depends on PLAT_SPEAR - default y + depends on PLAT_SPEAR || COMPILE_TEST + default PLAT_SPEAR help This adds the CPUFreq driver support for SPEAr SOCs. config ARM_STI_CPUFREQ tristate "STi CPUFreq support" - depends on CPUFREQ_DT && SOC_STIH407 + depends on CPUFREQ_DT + depends on SOC_STIH407 || COMPILE_TEST help This driver uses the generic OPP framework to match the running platform with a predefined set of suitable values. If not provided @@ -216,35 +231,39 @@ config ARM_STI_CPUFREQ config ARM_TEGRA20_CPUFREQ tristate "Tegra20/30 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT - default y + depends on ARCH_TEGRA || COMPILE_TEST + depends on CPUFREQ_DT + default ARCH_TEGRA help This adds the CPUFreq driver support for Tegra20/30 SOCs. config ARM_TEGRA124_CPUFREQ bool "Tegra124 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT - default y + depends on ARCH_TEGRA || COMPILE_TEST + depends on CPUFREQ_DT + default ARCH_TEGRA help This adds the CPUFreq driver support for Tegra124 SOCs. config ARM_TEGRA186_CPUFREQ tristate "Tegra186 CPUFreq support" - depends on ARCH_TEGRA && TEGRA_BPMP + depends on ARCH_TEGRA || COMPILE_TEST + depends on TEGRA_BPMP help This adds the CPUFreq driver support for Tegra186 SOCs. config ARM_TEGRA194_CPUFREQ tristate "Tegra194 CPUFreq support" - depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP - default y + depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST) + depends on TEGRA_BPMP + default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC help This adds CPU frequency driver support for Tegra194 SOCs. config ARM_TI_CPUFREQ bool "Texas Instruments CPUFreq support" - depends on ARCH_OMAP2PLUS || ARCH_K3 - default y + depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST + default ARCH_OMAP2PLUS || ARCH_K3 help This driver enables valid OPPs on the running platform based on values contained within the SoC in use. Enable this in order to @@ -255,7 +274,7 @@ config ARM_TI_CPUFREQ config ARM_PXA2xx_CPUFREQ tristate "Intel PXA2xx CPUfreq driver" - depends on PXA27x || PXA25x + depends on PXA27x || PXA25x || COMPILE_TEST help This add the CPUFreq driver support for Intel PXA2xx SOCs. diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 58151ca56695..551e65d35a1d 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -1,29 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config CPU_FREQ_CBE - tristate "CBE frequency scaling" - depends on CBE_RAS && PPC_CELL - default m - help - This adds the cpufreq driver for Cell BE processors. - For details, take a look at <file:Documentation/cpu-freq/>. - If you don't have such processor, say N - -config CPU_FREQ_CBE_PMI - bool "CBE frequency scaling using PMI interface" - depends on CPU_FREQ_CBE - default n - help - Select this, if you want to use the PMI interface to switch - frequencies. Using PMI, the processor will not only be able to run at - lower speed, but also at lower core voltage. - -config CPU_FREQ_MAPLE - bool "Support for Maple 970FX Evaluation Board" - depends on PPC_MAPLE - help - This adds support for frequency switching on Maple 970FX - Evaluation Board and compatible boards (IBM JS2x blades). - config CPU_FREQ_PMAC bool "Support for Apple PowerBooks" depends on ADB_PMU && PPC32 diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 438c9e75a04d..2c5c228408bf 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE config X86_AMD_PSTATE_UT tristate "selftest for AMD Processor P-State driver" depends on X86 && ACPI_PROCESSOR + depends on X86_AMD_PSTATE default n help This kernel module is used for testing. It's safe to say M here. @@ -339,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK option lets the probing code bypass some of those checks if the parameter "relaxed_check=1" is passed to the module. +config CPUFREQ_ARCH_CUR_FREQ + default y + bool "Current frequency derived from HW provided feedback" + help + This determines whether the scaling_cur_freq sysfs attribute returns + the last requested frequency or a more precise value based on hardware + provided feedback (as architected counters). + Given that a more precise frequency can now be provided via the + cpuinfo_avg_freq attribute, by enabling this option, + scaling_cur_freq maintains the provision of a counter based frequency, + for compatibility reasons. + diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8d141c71b016..d38526b8e063 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -15,7 +15,9 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o +obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o +obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o # Traces CFLAGS_amd-pstate-trace.o := -I$(src) @@ -52,6 +54,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o ################################################################################## # ARM SoC drivers +obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o @@ -89,10 +92,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o ################################################################################## # PowerPC platform drivers -obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o -ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o -obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o -obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o @@ -103,6 +102,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o # Other platform drivers obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_CPUFREQ) += loongson3_cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 37f1cdf46d29..4f7f9201598d 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -50,8 +50,6 @@ enum { #define AMD_MSR_RANGE (0x7) #define HYGON_MSR_RANGE (0x7) -#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) - struct acpi_cpufreq_data { unsigned int resume; unsigned int cpu_feature; @@ -75,20 +73,17 @@ static unsigned int acpi_pstate_strict; static bool boost_state(unsigned int cpu) { - u32 lo, hi; u64 msr; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: - rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_HYGON: case X86_VENDOR_AMD: - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; @@ -115,14 +110,14 @@ static int boost_set_msr(bool enable) return -EINVAL; } - rdmsrl(msr_addr, val); + rdmsrq(msr_addr, val); if (enable) val &= ~msr_mask; else val |= msr_mask; - wrmsrl(msr_addr, val); + wrmsrq(msr_addr, val); return 0; } @@ -628,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static u64 get_max_boost_ratio(unsigned int cpu) +/* + * get_max_boost_ratio: Computes the max_boost_ratio as the ratio + * between the highest_perf and the nominal_perf. + * + * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal + * frequency via @nominal_freq if it is non-NULL pointer. + */ +static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) { struct cppc_perf_caps perf_caps; u64 highest_perf, nominal_perf; @@ -644,13 +646,22 @@ static u64 get_max_boost_ratio(unsigned int cpu) return 0; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - highest_perf = amd_get_highest_perf(); - else + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + ret = amd_get_boost_ratio_numerator(cpu, &highest_perf); + if (ret) { + pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n", + cpu, ret); + return 0; + } + } else { highest_perf = perf_caps.highest_perf; + } nominal_perf = perf_caps.nominal_perf; + if (nominal_freq) + *nominal_freq = perf_caps.nominal_freq * 1000; + if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); return 0; @@ -663,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + #else -static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) +{ + return 0; +} #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -674,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) struct acpi_cpufreq_data *data; unsigned int cpu = policy->cpu; struct cpuinfo_x86 *c = &cpu_data(cpu); + u64 max_boost_ratio, nominal_freq = 0; unsigned int valid_states = 0; unsigned int result = 0; - u64 max_boost_ratio; unsigned int i; #ifdef CONFIG_SMP static int blacklisted; @@ -826,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) } freq_table[valid_states].frequency = CPUFREQ_TABLE_END; - max_boost_ratio = get_max_boost_ratio(cpu); + max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq); if (max_boost_ratio) { - unsigned int freq = freq_table[0].frequency; + unsigned int freq = nominal_freq; /* - * Because the loop above sorts the freq_table entries in the - * descending order, freq is the maximum frequency in the table. - * Assume that it corresponds to the CPPC nominal frequency and - * use it to set cpuinfo.max_freq. + * The loop above sorts the freq_table entries in the + * descending order. If ACPI CPPC has not advertised + * the nominal frequency (this is possible in CPPC + * revisions prior to 3), then use the first entry in + * the pstate table as a proxy for nominal frequency. */ + if (!freq) + freq = freq_table[0].frequency; + policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; } else { /* @@ -890,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) pr_warn(FW_WARN "P-state 0 is not max freq\n"); - if (acpi_cpufreq_driver.set_boost) - set_boost(policy, acpi_cpufreq_driver.boost_enabled); + if (acpi_cpufreq_driver.set_boost) { + if (policy->boost_supported) { + /* + * The firmware may have altered boost state while the + * CPU was offline (for example during a suspend-resume + * cycle). + */ + if (policy->boost_enabled != boost_state(cpu)) + set_boost(policy, policy->boost_enabled); + } else { + policy->boost_supported = true; + } + } return result; @@ -906,7 +936,7 @@ err_free: return result; } -static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = policy->driver_data; @@ -919,8 +949,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) free_cpumask_var(data->freqdomain_cpus); kfree(policy->freq_table); kfree(data); - - return 0; } static int acpi_cpufreq_resume(struct cpufreq_policy *policy) @@ -935,7 +963,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) } static struct freq_attr *acpi_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, &freqdomain_cpus, #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB &cpb, @@ -1024,7 +1051,7 @@ static struct platform_driver acpi_cpufreq_platdrv = { .driver = { .name = "acpi-cpufreq", }, - .remove_new = acpi_cpufreq_remove, + .remove = acpi_cpufreq_remove, }; static int __init acpi_cpufreq_init(void) diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c new file mode 100644 index 000000000000..4fe39eadd163 --- /dev/null +++ b/drivers/cpufreq/airoha-cpufreq.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bitfield.h> +#include <linux/cpufreq.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> + +#include "cpufreq-dt.h" + +struct airoha_cpufreq_priv { + int opp_token; + struct dev_pm_domain_list *pd_list; + struct platform_device *cpufreq_dt; +}; + +static struct platform_device *cpufreq_pdev; + +/* NOP function to disable OPP from setting clock */ +static int airoha_cpufreq_config_clks_nop(struct device *dev, + struct opp_table *opp_table, + struct dev_pm_opp *opp, + void *data, bool scaling_down) +{ + return 0; +} + +static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL }; +static const char * const airoha_cpufreq_pd_names[] = { "perf" }; + +static int airoha_cpufreq_probe(struct platform_device *pdev) +{ + const struct dev_pm_domain_attach_data attach_data = { + .pd_names = airoha_cpufreq_pd_names, + .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names), + .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP, + }; + struct dev_pm_opp_config config = { + .clk_names = airoha_cpufreq_clk_names, + .config_clks = airoha_cpufreq_config_clks_nop, + }; + struct platform_device *cpufreq_dt; + struct airoha_cpufreq_priv *priv; + struct device *dev = &pdev->dev; + struct device *cpu_dev; + int ret; + + /* CPUs refer to the same OPP table */ + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* Set OPP table conf with NOP config_clks */ + priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config); + if (priv->opp_token < 0) + return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n"); + + /* Attach PM for OPP */ + ret = dev_pm_domain_attach_list(cpu_dev, &attach_data, + &priv->pd_list); + if (ret) + goto clear_opp_config; + + cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); + ret = PTR_ERR_OR_ZERO(cpufreq_dt); + if (ret) { + dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret); + goto detach_pm; + } + + priv->cpufreq_dt = cpufreq_dt; + platform_set_drvdata(pdev, priv); + + return 0; + +detach_pm: + dev_pm_domain_detach_list(priv->pd_list); +clear_opp_config: + dev_pm_opp_clear_config(priv->opp_token); + + return ret; +} + +static void airoha_cpufreq_remove(struct platform_device *pdev) +{ + struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev); + + platform_device_unregister(priv->cpufreq_dt); + + dev_pm_domain_detach_list(priv->pd_list); + + dev_pm_opp_clear_config(priv->opp_token); +} + +static struct platform_driver airoha_cpufreq_driver = { + .probe = airoha_cpufreq_probe, + .remove = airoha_cpufreq_remove, + .driver = { + .name = "airoha-cpufreq", + }, +}; + +static const struct of_device_id airoha_cpufreq_match_list[] __initconst = { + { .compatible = "airoha,en7581" }, + {}, +}; +MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list); + +static int __init airoha_cpufreq_init(void) +{ + struct device_node *np = of_find_node_by_path("/"); + const struct of_device_id *match; + int ret; + + if (!np) + return -ENODEV; + + match = of_match_node(airoha_cpufreq_match_list, np); + of_node_put(np); + if (!match) + return -ENODEV; + + ret = platform_driver_register(&airoha_cpufreq_driver); + if (unlikely(ret < 0)) + return ret; + + cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq", + -1, match, sizeof(*match)); + ret = PTR_ERR_OR_ZERO(cpufreq_pdev); + if (ret) + platform_driver_unregister(&airoha_cpufreq_driver); + + return ret; +} +module_init(airoha_cpufreq_init); + +static void __exit airoha_cpufreq_exit(void) +{ + platform_device_unregister(cpufreq_pdev); + platform_driver_unregister(&airoha_cpufreq_driver); +} +module_exit(airoha_cpufreq_exit); + +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h index 35f38ae67fb1..32e1bdc588c5 100644 --- a/drivers/cpufreq/amd-pstate-trace.h +++ b/drivers/cpufreq/amd-pstate-trace.h @@ -24,15 +24,14 @@ TRACE_EVENT(amd_pstate_perf, - TP_PROTO(unsigned long min_perf, - unsigned long target_perf, - unsigned long capacity, + TP_PROTO(u8 min_perf, + u8 target_perf, + u8 capacity, u64 freq, u64 mperf, u64 aperf, u64 tsc, unsigned int cpu_id, - bool changed, bool fast_switch ), @@ -44,20 +43,18 @@ TRACE_EVENT(amd_pstate_perf, aperf, tsc, cpu_id, - changed, fast_switch ), TP_STRUCT__entry( - __field(unsigned long, min_perf) - __field(unsigned long, target_perf) - __field(unsigned long, capacity) + __field(u8, min_perf) + __field(u8, target_perf) + __field(u8, capacity) __field(unsigned long long, freq) __field(unsigned long long, mperf) __field(unsigned long long, aperf) __field(unsigned long long, tsc) __field(unsigned int, cpu_id) - __field(bool, changed) __field(bool, fast_switch) ), @@ -70,24 +67,72 @@ TRACE_EVENT(amd_pstate_perf, __entry->aperf = aperf; __entry->tsc = tsc; __entry->cpu_id = cpu_id; - __entry->changed = changed; __entry->fast_switch = fast_switch; ), - TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s", - (unsigned long)__entry->min_perf, - (unsigned long)__entry->target_perf, - (unsigned long)__entry->capacity, + TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s", + (u8)__entry->min_perf, + (u8)__entry->target_perf, + (u8)__entry->capacity, (unsigned long long)__entry->freq, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, (unsigned long long)__entry->tsc, (unsigned int)__entry->cpu_id, - (__entry->changed) ? "true" : "false", (__entry->fast_switch) ? "true" : "false" ) ); +TRACE_EVENT(amd_pstate_epp_perf, + + TP_PROTO(unsigned int cpu_id, + u8 highest_perf, + u8 epp, + u8 min_perf, + u8 max_perf, + bool boost, + bool changed + ), + + TP_ARGS(cpu_id, + highest_perf, + epp, + min_perf, + max_perf, + boost, + changed), + + TP_STRUCT__entry( + __field(unsigned int, cpu_id) + __field(u8, highest_perf) + __field(u8, epp) + __field(u8, min_perf) + __field(u8, max_perf) + __field(bool, boost) + __field(bool, changed) + ), + + TP_fast_assign( + __entry->cpu_id = cpu_id; + __entry->highest_perf = highest_perf; + __entry->epp = epp; + __entry->min_perf = min_perf; + __entry->max_perf = max_perf; + __entry->boost = boost; + __entry->changed = changed; + ), + + TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u", + (unsigned int)__entry->cpu_id, + (u8)__entry->min_perf, + (u8)__entry->max_perf, + (u8)__entry->highest_perf, + (u8)__entry->epp, + (bool)__entry->boost, + (bool)__entry->changed + ) +); + #endif /* _AMD_PSTATE_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index f04ae67dda37..447b9aa5ce40 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -22,43 +22,40 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bitfield.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/fs.h> -#include <linux/amd-pstate.h> +#include <linux/cleanup.h> #include <acpi/cppc_acpi.h> -/* - * Abbreviations: - * amd_pstate_ut: used as a shortform for AMD P-State unit test. - * It helps to keep variable names smaller, simpler - */ -enum amd_pstate_ut_result { - AMD_PSTATE_UT_RESULT_PASS, - AMD_PSTATE_UT_RESULT_FAIL, -}; +#include <asm/msr.h> + +#include "amd-pstate.h" + struct amd_pstate_ut_struct { const char *name; - void (*func)(u32 index); - enum amd_pstate_ut_result result; + int (*func)(u32 index); }; /* * Kernel module for testing the AMD P-State unit test */ -static void amd_pstate_ut_acpi_cpc_valid(u32 index); -static void amd_pstate_ut_check_enabled(u32 index); -static void amd_pstate_ut_check_perf(u32 index); -static void amd_pstate_ut_check_freq(u32 index); +static int amd_pstate_ut_acpi_cpc_valid(u32 index); +static int amd_pstate_ut_check_enabled(u32 index); +static int amd_pstate_ut_check_perf(u32 index); +static int amd_pstate_ut_check_freq(u32 index); +static int amd_pstate_ut_check_driver(u32 index); static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = { {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid }, {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled }, {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf }, - {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq } + {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }, + {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver } }; static bool get_shared_mem(void) @@ -74,71 +71,67 @@ static bool get_shared_mem(void) /* * check the _CPC object is present in SBIOS. */ -static void amd_pstate_ut_acpi_cpc_valid(u32 index) +static int amd_pstate_ut_acpi_cpc_valid(u32 index) { - if (acpi_cpc_valid()) - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - else { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + if (!acpi_cpc_valid()) { pr_err("%s the _CPC object is not present in SBIOS!\n", __func__); + return -EINVAL; } + + return 0; } -static void amd_pstate_ut_pstate_enable(u32 index) +/* + * check if amd pstate is enabled + */ +static int amd_pstate_ut_check_enabled(u32 index) { - int ret = 0; u64 cppc_enable = 0; + int ret; - ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); + if (get_shared_mem()) + return 0; + + ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); if (ret) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; - pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); - return; + pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); + return ret; } - if (cppc_enable) - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - else { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + + if (!cppc_enable) { pr_err("%s amd pstate must be enabled!\n", __func__); + return -EINVAL; } -} -/* - * check if amd pstate is enabled - */ -static void amd_pstate_ut_check_enabled(u32 index) -{ - if (get_shared_mem()) - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - else - amd_pstate_ut_pstate_enable(index); + return 0; } /* * check if performance values are reasonable. * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0 */ -static void amd_pstate_ut_check_perf(u32 index) +static int amd_pstate_ut_check_perf(u32 index) { int cpu = 0, ret = 0; u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0; u64 cap1 = 0; struct cppc_perf_caps cppc_perf; - struct cpufreq_policy *policy = NULL; - struct amd_cpudata *cpudata = NULL; + union perf_cached cur_perf; + + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; + struct amd_cpudata *cpudata; - for_each_possible_cpu(cpu) { policy = cpufreq_cpu_get(cpu); if (!policy) - break; + continue; cpudata = policy->driver_data; if (get_shared_mem()) { ret = cppc_get_perf_caps(cpu, &cppc_perf); if (ret) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret); - goto skip_test; + return ret; } highest_perf = cppc_perf.highest_perf; @@ -146,49 +139,46 @@ static void amd_pstate_ut_check_perf(u32 index) lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; lowest_perf = cppc_perf.lowest_perf; } else { - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); + ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret); - goto skip_test; + return ret; } - highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); - nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1); - lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1); - lowest_perf = AMD_CPPC_LOWEST_PERF(cap1); + highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1); + nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1); + lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1); + lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); } - if ((highest_perf != READ_ONCE(cpudata->highest_perf)) || - (nominal_perf != READ_ONCE(cpudata->nominal_perf)) || - (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) || - (lowest_perf != READ_ONCE(cpudata->lowest_perf))) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; - pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n", - __func__, cpu, highest_perf, cpudata->highest_perf, - nominal_perf, cpudata->nominal_perf, - lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf, - lowest_perf, cpudata->lowest_perf); - goto skip_test; + cur_perf = READ_ONCE(cpudata->perf); + if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) { + pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n", + __func__, cpu, highest_perf, cur_perf.highest_perf); + return -EINVAL; + } + if (nominal_perf != cur_perf.nominal_perf || + (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) || + (lowest_perf != cur_perf.lowest_perf)) { + pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n", + __func__, cpu, nominal_perf, cur_perf.nominal_perf, + lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf, + lowest_perf, cur_perf.lowest_perf); + return -EINVAL; } if (!((highest_perf >= nominal_perf) && (nominal_perf > lowest_nonlinear_perf) && - (lowest_nonlinear_perf > lowest_perf) && + (lowest_nonlinear_perf >= lowest_perf) && (lowest_perf > 0))) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n", __func__, cpu, highest_perf, nominal_perf, lowest_nonlinear_perf, lowest_perf); - goto skip_test; + return -EINVAL; } - cpufreq_cpu_put(policy); } - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - return; -skip_test: - cpufreq_cpu_put(policy); + return 0; } /* @@ -196,59 +186,88 @@ skip_test: * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0 * check max freq when set support boost mode. */ -static void amd_pstate_ut_check_freq(u32 index) +static int amd_pstate_ut_check_freq(u32 index) { int cpu = 0; - struct cpufreq_policy *policy = NULL; - struct amd_cpudata *cpudata = NULL; - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; + struct amd_cpudata *cpudata; + policy = cpufreq_cpu_get(cpu); if (!policy) - break; + continue; cpudata = policy->driver_data; - if (!((cpudata->max_freq >= cpudata->nominal_freq) && + if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) && (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && - (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && - (cpudata->min_freq > 0))) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) && + (policy->cpuinfo.min_freq > 0))) { pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", - __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, - cpudata->lowest_nonlinear_freq, cpudata->min_freq); - goto skip_test; + __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq, + cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq); + return -EINVAL; } - if (cpudata->min_freq != policy->min) { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; - pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n", - __func__, cpu, cpudata->min_freq, policy->min); - goto skip_test; + if (cpudata->lowest_nonlinear_freq != policy->min) { + pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n", + __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min); + return -EINVAL; } if (cpudata->boost_supported) { - if ((policy->max == cpudata->max_freq) || - (policy->max == cpudata->nominal_freq)) - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - else { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + if ((policy->max != policy->cpuinfo.max_freq) && + (policy->max != cpudata->nominal_freq)) { pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", - __func__, cpu, policy->max, cpudata->max_freq, + __func__, cpu, policy->max, policy->cpuinfo.max_freq, cpudata->nominal_freq); - goto skip_test; + return -EINVAL; } } else { - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d must support boost!\n", __func__, cpu); - goto skip_test; + return -EINVAL; } - cpufreq_cpu_put(policy); } - amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; - return; -skip_test: - cpufreq_cpu_put(policy); + return 0; +} + +static int amd_pstate_set_mode(enum amd_pstate_mode mode) +{ + const char *mode_str = amd_pstate_get_mode_string(mode); + + pr_debug("->setting mode to %s\n", mode_str); + + return amd_pstate_update_status(mode_str, strlen(mode_str)); +} + +static int amd_pstate_ut_check_driver(u32 index) +{ + enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE; + enum amd_pstate_mode orig_mode = amd_pstate_get_status(); + int ret; + + for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) { + ret = amd_pstate_set_mode(mode1); + if (ret) + return ret; + for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) { + if (mode1 == mode2) + continue; + ret = amd_pstate_set_mode(mode2); + if (ret) + goto out; + } + } + +out: + if (ret) + pr_warn("%s: failed to update status for %s->%s: %d\n", __func__, + amd_pstate_get_mode_string(mode1), + amd_pstate_get_mode_string(mode2), ret); + + amd_pstate_set_mode(orig_mode); + return ret; } static int __init amd_pstate_ut_init(void) @@ -256,16 +275,12 @@ static int __init amd_pstate_ut_init(void) u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases); for (i = 0; i < arr_size; i++) { - amd_pstate_ut_cases[i].func(i); - switch (amd_pstate_ut_cases[i].result) { - case AMD_PSTATE_UT_RESULT_PASS: + int ret = amd_pstate_ut_cases[i].func(i); + + if (ret) + pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret); + else pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name); - break; - case AMD_PSTATE_UT_RESULT_FAIL: - default: - pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name); - break; - } } return 0; diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 2015c9fcc3c9..f3477ab37742 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -22,6 +22,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bitfield.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -36,7 +37,6 @@ #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/static_call.h> -#include <linux/amd-pstate.h> #include <linux/topology.h> #include <acpi/processor.h> @@ -46,27 +46,47 @@ #include <asm/processor.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> + +#include "amd-pstate.h" #include "amd-pstate-trace.h" #define AMD_PSTATE_TRANSITION_LATENCY 20000 #define AMD_PSTATE_TRANSITION_DELAY 1000 -#define AMD_PSTATE_PREFCORE_THRESHOLD 166 +#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 + +#define AMD_CPPC_EPP_PERFORMANCE 0x00 +#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 +#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF +#define AMD_CPPC_EPP_POWERSAVE 0xFF + +static const char * const amd_pstate_mode_string[] = { + [AMD_PSTATE_UNDEFINED] = "undefined", + [AMD_PSTATE_DISABLE] = "disable", + [AMD_PSTATE_PASSIVE] = "passive", + [AMD_PSTATE_ACTIVE] = "active", + [AMD_PSTATE_GUIDED] = "guided", + NULL, +}; + +const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) +{ + if (mode < 0 || mode >= AMD_PSTATE_MAX) + return NULL; + return amd_pstate_mode_string[mode]; +} +EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); + +struct quirk_entry { + u32 nominal_freq; + u32 lowest_freq; +}; -/* - * TODO: We need more time to fine tune processors with shared memory solution - * with community together. - * - * There are some performance drops on the CPU benchmarks which reports from - * Suse. We are co-working with them to fine tune the shared memory solution. So - * we disable it by default to go acpi-cpufreq on these processors and add a - * module parameter to be able to enable it manually for debugging. - */ static struct cpufreq_driver *current_pstate_driver; static struct cpufreq_driver amd_pstate_driver; static struct cpufreq_driver amd_pstate_epp_driver; static int cppc_state = AMD_PSTATE_UNDEFINED; -static bool cppc_enabled; static bool amd_pstate_prefcore = true; +static struct quirk_entry *quirks; /* * AMD Energy Preference Performance (EPP) @@ -111,6 +131,54 @@ static unsigned int epp_values[] = { typedef int (*cppc_mode_transition_fn)(int); +static struct quirk_entry quirk_amd_7k62 = { + .nominal_freq = 2600, + .lowest_freq = 550, +}; + +static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val) +{ + u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq); + + return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf); +} + +static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val) +{ + return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val, + perf.nominal_perf); +} + +static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) +{ + /** + * match the broken bios for family 17h processor support CPPC V2 + * broken BIOS lack of nominal_freq and lowest_freq capabilities + * definition in ACPI tables + */ + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { + quirks = dmi->driver_data; + pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); + return 1; + } + + return 0; +} + +static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = { + { + .callback = dmi_matched_7k62_bios_bug, + .ident = "AMD EPYC 7K62", + .matches = { + DMI_MATCH(DMI_BIOS_VERSION, "5.14"), + DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"), + }, + .driver_data = &quirk_amd_7k62, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table); + static inline int get_mode_idx_from_str(const char *str, size_t size) { int i; @@ -122,231 +190,273 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) return -EINVAL; } -static DEFINE_MUTEX(amd_pstate_limits_lock); static DEFINE_MUTEX(amd_pstate_driver_lock); -static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) +static u8 msr_get_epp(struct amd_cpudata *cpudata) { - u64 epp; + u64 value; int ret; - if (boot_cpu_has(X86_FEATURE_CPPC)) { - if (!cppc_req_cached) { - epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, - &cppc_req_cached); - if (epp) - return epp; - } - epp = (cppc_req_cached >> 24) & 0xFF; - } else { - ret = cppc_get_epp_perf(cpudata->cpu, &epp); - if (ret < 0) { - pr_debug("Could not retrieve energy perf value (%d)\n", ret); - return -EIO; - } + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); + if (ret < 0) { + pr_debug("Could not retrieve energy perf value (%d)\n", ret); + return ret; } - return (s16)(epp & 0xff); + return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value); } -static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) +DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp); + +static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata) { - s16 epp; - int index = -EINVAL; + return static_call(amd_pstate_get_epp)(cpudata); +} - epp = amd_pstate_get_epp(cpudata, 0); - if (epp < 0) - return epp; +static u8 shmem_get_epp(struct amd_cpudata *cpudata) +{ + u64 epp; + int ret; - switch (epp) { - case AMD_CPPC_EPP_PERFORMANCE: - index = EPP_INDEX_PERFORMANCE; - break; - case AMD_CPPC_EPP_BALANCE_PERFORMANCE: - index = EPP_INDEX_BALANCE_PERFORMANCE; - break; - case AMD_CPPC_EPP_BALANCE_POWERSAVE: - index = EPP_INDEX_BALANCE_POWERSAVE; - break; - case AMD_CPPC_EPP_POWERSAVE: - index = EPP_INDEX_POWERSAVE; - break; - default: - break; + ret = cppc_get_epp_perf(cpudata->cpu, &epp); + if (ret < 0) { + pr_debug("Could not retrieve energy perf value (%d)\n", ret); + return ret; } - return index; + return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); } -static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) +static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, + u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { - int ret; - struct cppc_perf_ctrls perf_ctrls; - - if (boot_cpu_has(X86_FEATURE_CPPC)) { - u64 value = READ_ONCE(cpudata->cppc_req_cached); + struct amd_cpudata *cpudata = policy->driver_data; + u64 value, prev; + + value = prev = READ_ONCE(cpudata->cppc_req_cached); + + value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK | + AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK); + value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf); + value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf); + value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); + value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = READ_ONCE(cpudata->perf); + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value != prev); + } - value &= ~GENMASK_ULL(31, 24); - value |= (u64)epp << 24; - WRITE_ONCE(cpudata->cppc_req_cached, value); + if (value == prev) + return 0; - ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); - if (!ret) - cpudata->epp_cached = epp; + if (fast_switch) { + wrmsrq(MSR_AMD_CPPC_REQ, value); + return 0; } else { - perf_ctrls.energy_perf = epp; - ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); - if (ret) { - pr_debug("failed to set energy perf value (%d)\n", ret); + int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + + if (ret) return ret; - } - cpudata->epp_cached = epp; } - return ret; + WRITE_ONCE(cpudata->cppc_req_cached, value); + + return 0; } -static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, - int pref_index) +DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); + +static inline int amd_pstate_update_perf(struct cpufreq_policy *policy, + u8 min_perf, u8 des_perf, + u8 max_perf, u8 epp, + bool fast_switch) { - int epp = -EINVAL; + return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf, + max_perf, epp, fast_switch); +} + +static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) +{ + struct amd_cpudata *cpudata = policy->driver_data; + u64 value, prev; int ret; - if (!pref_index) { - pr_debug("EPP pref_index is invalid\n"); - return -EINVAL; + value = prev = READ_ONCE(cpudata->cppc_req_cached); + value &= ~AMD_CPPC_EPP_PERF_MASK; + value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + value != prev); } - if (epp == -EINVAL) - epp = epp_values[pref_index]; + if (value == prev) + return 0; - if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { - pr_debug("EPP cannot be set under performance policy\n"); - return -EBUSY; + ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + if (ret) { + pr_err("failed to set energy perf value (%d)\n", ret); + return ret; } - ret = amd_pstate_set_epp(cpudata, epp); + /* update both so that msr_update_perf() can effectively check */ + WRITE_ONCE(cpudata->cppc_req_cached, value); return ret; } -static inline int pstate_enable(bool enable) -{ - int ret, cpu; - unsigned long logical_proc_id_mask = 0; +DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp); - if (enable == cppc_enabled) - return 0; +static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp) +{ + return static_call(amd_pstate_set_epp)(policy, epp); +} - for_each_present_cpu(cpu) { - unsigned long logical_id = topology_logical_die_id(cpu); +static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) +{ + struct amd_cpudata *cpudata = policy->driver_data; + struct cppc_perf_ctrls perf_ctrls; + u8 epp_cached; + u64 value; + int ret; - if (test_bit(logical_id, &logical_proc_id_mask)) - continue; - set_bit(logical_id, &logical_proc_id_mask); + epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; - ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE, - enable); - if (ret) - return ret; + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + epp != epp_cached); } - cppc_enabled = enable; - return 0; -} - -static int cppc_enable(bool enable) -{ - int cpu, ret = 0; - struct cppc_perf_ctrls perf_ctrls; - - if (enable == cppc_enabled) + if (epp == epp_cached) return 0; - for_each_present_cpu(cpu) { - ret = cppc_set_enable(cpu, enable); - if (ret) - return ret; - - /* Enable autonomous mode for EPP */ - if (cppc_state == AMD_PSTATE_ACTIVE) { - /* Set desired perf as zero to allow EPP firmware control */ - perf_ctrls.desired_perf = 0; - ret = cppc_set_perf(cpu, &perf_ctrls); - if (ret) - return ret; - } + perf_ctrls.energy_perf = epp; + ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); + if (ret) { + pr_debug("failed to set energy perf value (%d)\n", ret); + return ret; } - cppc_enabled = enable; + value = READ_ONCE(cpudata->cppc_req_cached); + value &= ~AMD_CPPC_EPP_PERF_MASK; + value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + WRITE_ONCE(cpudata->cppc_req_cached, value); + return ret; } -DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); +static inline int msr_cppc_enable(struct cpufreq_policy *policy) +{ + return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); +} + +static int shmem_cppc_enable(struct cpufreq_policy *policy) +{ + return cppc_set_enable(policy->cpu, 1); +} + +DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable); -static inline int amd_pstate_enable(bool enable) +static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy) { - return static_call(amd_pstate_enable)(enable); + return static_call(amd_pstate_cppc_enable)(policy); } -static int pstate_init_perf(struct amd_cpudata *cpudata) +static int msr_init_perf(struct amd_cpudata *cpudata) { - u64 cap1; - u32 highest_perf; + union perf_cached perf = READ_ONCE(cpudata->perf); + u64 cap1, numerator, cppc_req; + u8 min_perf; - int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, + int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) return ret; - /* For platforms that do not support the preferred core feature, the - * highest_pef may be configured with 166 or 255, to avoid max frequency - * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as - * the default max perf. + ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); + if (ret) + return ret; + + ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req); + if (ret) + return ret; + + WRITE_ONCE(cpudata->cppc_req_cached, cppc_req); + min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req); + + /* + * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an + * indication that the min_perf value is the one specified through the BIOS option */ - if (cpudata->hw_prefcore) - highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD; - else - highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); - WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); - WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); - WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); - WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1)); - WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1)); + cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK); + + if (!cppc_req) + perf.bios_min_perf = min_perf; + + perf.highest_perf = numerator; + perf.max_limit_perf = numerator; + perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); + perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1); + perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1); + perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); + WRITE_ONCE(cpudata->perf, perf); + WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1)); + return 0; } -static int cppc_init_perf(struct amd_cpudata *cpudata) +static int shmem_init_perf(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; - u32 highest_perf; + union perf_cached perf = READ_ONCE(cpudata->perf); + u64 numerator; + bool auto_sel; int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) return ret; - if (cpudata->hw_prefcore) - highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD; - else - highest_perf = cppc_perf.highest_perf; - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); - WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); - WRITE_ONCE(cpudata->lowest_nonlinear_perf, - cppc_perf.lowest_nonlinear_perf); - WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); + if (ret) + return ret; + + perf.highest_perf = numerator; + perf.max_limit_perf = numerator; + perf.min_limit_perf = cppc_perf.lowest_perf; + perf.nominal_perf = cppc_perf.nominal_perf; + perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; + perf.lowest_perf = cppc_perf.lowest_perf; + WRITE_ONCE(cpudata->perf, perf); WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); - WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf); if (cppc_state == AMD_PSTATE_ACTIVE) return 0; - ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); + ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel); if (ret) { pr_warn("failed to get auto_sel, ret: %d\n", ret); return 0; @@ -361,44 +471,63 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) return ret; } -DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); +DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf); static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) { return static_call(amd_pstate_init_perf)(cpudata); } -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch) -{ - if (fast_switch) - wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); - else - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, - READ_ONCE(cpudata->cppc_req_cached)); -} - -static void cppc_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, bool fast_switch) +static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf, + u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { + struct amd_cpudata *cpudata = policy->driver_data; struct cppc_perf_ctrls perf_ctrls; + u64 value, prev; + int ret; + + if (cppc_state == AMD_PSTATE_ACTIVE) { + int ret = shmem_set_epp(policy, epp); + + if (ret) + return ret; + } + + value = prev = READ_ONCE(cpudata->cppc_req_cached); + + value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK | + AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK); + value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf); + value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf); + value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); + value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = READ_ONCE(cpudata->perf); + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value != prev); + } + + if (value == prev) + return 0; perf_ctrls.max_perf = max_perf; perf_ctrls.min_perf = min_perf; perf_ctrls.desired_perf = des_perf; - cppc_set_perf(cpudata->cpu, &perf_ctrls); -} + ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); + if (ret) + return ret; -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); + WRITE_ONCE(cpudata->cppc_req_cached, value); -static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, bool fast_switch) -{ - static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, - max_perf, fast_switch); + return 0; } static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) @@ -407,8 +536,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) unsigned long flags; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { @@ -434,99 +563,106 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) return true; } -static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) +static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, + u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags) { - u64 prev = READ_ONCE(cpudata->cppc_req_cached); - u64 value = prev; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); + union perf_cached perf = READ_ONCE(cpudata->perf); + + if (!policy) + return; + + /* limit the max perf when core performance boost feature is disabled */ + if (!cpudata->boost_supported) + max_perf = min_t(u8, perf.nominal_perf, max_perf); - min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, - cpudata->max_limit_perf); - max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, - cpudata->max_limit_perf); - des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); + des_perf = clamp_t(u8, des_perf, min_perf, max_perf); + + policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf); if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { min_perf = des_perf; des_perf = 0; } - value &= ~AMD_CPPC_MIN_PERF(~0L); - value |= AMD_CPPC_MIN_PERF(min_perf); - - value &= ~AMD_CPPC_DES_PERF(~0L); - value |= AMD_CPPC_DES_PERF(des_perf); - - value &= ~AMD_CPPC_MAX_PERF(~0L); - value |= AMD_CPPC_MAX_PERF(max_perf); - if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, - cpudata->cpu, (value != prev), fast_switch); + cpudata->cpu, fast_switch); } - if (value == prev) - return; - - WRITE_ONCE(cpudata->cppc_req_cached, value); - - amd_pstate_update_perf(cpudata, min_perf, des_perf, - max_perf, fast_switch); + amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch); } -static int amd_pstate_verify(struct cpufreq_policy_data *policy) +static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) { - cpufreq_verify_within_cpu_limits(policy); + /* + * Initialize lower frequency limit (i.e.policy->min) with + * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS, + * Override the initial value set by cpufreq core and amd-pstate qos_requests. + */ + if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = + cpufreq_cpu_get(policy_data->cpu); + struct amd_cpudata *cpudata; + union perf_cached perf; + + if (!policy) + return -EINVAL; + + cpudata = policy->driver_data; + perf = READ_ONCE(cpudata->perf); + + if (perf.bios_min_perf) + policy_data->min = perf_to_freq(perf, cpudata->nominal_freq, + perf.bios_min_perf); + else + policy_data->min = cpudata->lowest_nonlinear_freq; + } + + cpufreq_verify_within_cpu_limits(policy_data); return 0; } -static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) +static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) { - u32 max_limit_perf, min_limit_perf, lowest_perf; struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); - - lowest_perf = READ_ONCE(cpudata->lowest_perf); - if (min_limit_perf < lowest_perf) - min_limit_perf = lowest_perf; - - if (max_limit_perf < min_limit_perf) - max_limit_perf = min_limit_perf; - - WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); - WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); + perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max); WRITE_ONCE(cpudata->max_limit_freq, policy->max); - WRITE_ONCE(cpudata->min_limit_freq, policy->min); - return 0; + if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { + perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf); + WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq)); + } else { + perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min); + WRITE_ONCE(cpudata->min_limit_freq, policy->min); + } + + WRITE_ONCE(cpudata->perf, perf); } static int amd_pstate_update_freq(struct cpufreq_policy *policy, unsigned int target_freq, bool fast_switch) { struct cpufreq_freqs freqs; - struct amd_cpudata *cpudata = policy->driver_data; - unsigned long max_perf, min_perf, des_perf, cap_perf; + struct amd_cpudata *cpudata; + union perf_cached perf; + u8 des_perf; - if (!cpudata->max_freq) - return -ENODEV; + cpudata = policy->driver_data; if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) amd_pstate_update_min_max_limit(policy); - cap_perf = READ_ONCE(cpudata->highest_perf); - min_perf = READ_ONCE(cpudata->lowest_perf); - max_perf = cap_perf; + perf = READ_ONCE(cpudata->perf); freqs.old = policy->cur; freqs.new = target_freq; - des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, - cpudata->max_freq); + des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq); WARN_ON(fast_switch && !policy->fast_switch_enabled); /* @@ -537,8 +673,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy, if (!fast_switch) cpufreq_freq_transition_begin(policy, &freqs); - amd_pstate_update(cpudata, min_perf, des_perf, - max_perf, fast_switch, policy->governor->flags); + amd_pstate_update(cpudata, perf.min_limit_perf, des_perf, + perf.max_limit_perf, fast_switch, + policy->governor->flags); if (!fast_switch) cpufreq_freq_transition_end(policy, &freqs, false); @@ -566,273 +703,264 @@ static void amd_pstate_adjust_perf(unsigned int cpu, unsigned long target_perf, unsigned long capacity) { - unsigned long max_perf, min_perf, des_perf, - cap_perf, lowest_nonlinear_perf, max_freq; - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct amd_cpudata *cpudata = policy->driver_data; - unsigned int target_freq; + u8 max_perf, min_perf, des_perf, cap_perf; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); + struct amd_cpudata *cpudata; + union perf_cached perf; + + if (!policy) + return; + + cpudata = policy->driver_data; if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) amd_pstate_update_min_max_limit(policy); - - cap_perf = READ_ONCE(cpudata->highest_perf); - lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); - max_freq = READ_ONCE(cpudata->max_freq); + perf = READ_ONCE(cpudata->perf); + cap_perf = perf.highest_perf; des_perf = cap_perf; if (target_perf < capacity) des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); - min_perf = READ_ONCE(cpudata->lowest_perf); if (_min_perf < capacity) min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); + else + min_perf = cap_perf; - if (min_perf < lowest_nonlinear_perf) - min_perf = lowest_nonlinear_perf; + if (min_perf < perf.min_limit_perf) + min_perf = perf.min_limit_perf; - max_perf = cap_perf; + max_perf = perf.max_limit_perf; if (max_perf < min_perf) max_perf = min_perf; - des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); - target_freq = div_u64(des_perf * max_freq, max_perf); - policy->cur = target_freq; - amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, policy->governor->flags); - cpufreq_cpu_put(policy); } -static int amd_get_min_freq(struct amd_cpudata *cpudata) +static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on) { - struct cppc_perf_caps cppc_perf; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; - - /* Switch to khz */ - return cppc_perf.lowest_freq * 1000; -} - -static int amd_get_max_freq(struct amd_cpudata *cpudata) -{ - struct cppc_perf_caps cppc_perf; - u32 max_perf, max_freq, nominal_freq, nominal_perf; - u64 boost_ratio; + struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + u32 nominal_freq, max_freq; + int ret = 0; - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; + nominal_freq = READ_ONCE(cpudata->nominal_freq); + max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf); - nominal_freq = cppc_perf.nominal_freq; - nominal_perf = READ_ONCE(cpudata->nominal_perf); - max_perf = READ_ONCE(cpudata->highest_perf); + if (on) + policy->cpuinfo.max_freq = max_freq; + else if (policy->cpuinfo.max_freq > nominal_freq) + policy->cpuinfo.max_freq = nominal_freq; - boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, - nominal_perf); + policy->max = policy->cpuinfo.max_freq; - max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; + if (cppc_state == AMD_PSTATE_PASSIVE) { + ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq); + if (ret < 0) + pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu); + } - /* Switch to khz */ - return max_freq * 1000; + return ret < 0 ? ret : 0; } -static int amd_get_nominal_freq(struct amd_cpudata *cpudata) +static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) { - struct cppc_perf_caps cppc_perf; + struct amd_cpudata *cpudata = policy->driver_data; + int ret; - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; + if (!cpudata->boost_supported) { + pr_err("Boost mode is not supported by this processor or SBIOS\n"); + return -EOPNOTSUPP; + } + + ret = amd_pstate_cpu_boost_update(policy, state); + refresh_frequency_limits(policy); - /* Switch to khz */ - return cppc_perf.nominal_freq * 1000; + return ret; } -static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) +static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) { - struct cppc_perf_caps cppc_perf; - u32 lowest_nonlinear_freq, lowest_nonlinear_perf, - nominal_freq, nominal_perf; - u64 lowest_nonlinear_ratio; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; + u64 boost_val; + int ret = -1; - nominal_freq = cppc_perf.nominal_freq; - nominal_perf = READ_ONCE(cpudata->nominal_perf); + /* + * If platform has no CPB support or disable it, initialize current driver + * boost_enabled state to be false, it is not an error for cpufreq core to handle. + */ + if (!cpu_feature_enabled(X86_FEATURE_CPB)) { + pr_debug_once("Boost CPB capabilities not present in the processor\n"); + ret = 0; + goto exit_err; + } - lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); + if (ret) { + pr_err_once("failed to read initial CPU boost state!\n"); + ret = -EIO; + goto exit_err; + } - lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, - nominal_perf); + if (!(boost_val & MSR_K7_HWCR_CPB_DIS)) + cpudata->boost_supported = true; - lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; + return 0; - /* Switch to khz */ - return lowest_nonlinear_freq * 1000; +exit_err: + cpudata->boost_supported = false; + return ret; } -static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) +static void amd_perf_ctl_reset(unsigned int cpu) { - struct amd_cpudata *cpudata = policy->driver_data; - int ret; - - if (!cpudata->boost_supported) { - pr_err("Boost mode is not supported by this processor or SBIOS\n"); - return -EINVAL; - } + wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); +} - if (state) - policy->cpuinfo.max_freq = cpudata->max_freq; - else - policy->cpuinfo.max_freq = cpudata->nominal_freq; +#define CPPC_MAX_PERF U8_MAX - policy->max = policy->cpuinfo.max_freq; +static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) +{ + /* user disabled or not detected */ + if (!amd_pstate_prefcore) + return; - ret = freq_qos_update_request(&cpudata->req[1], - policy->cpuinfo.max_freq); - if (ret < 0) - return ret; + cpudata->hw_prefcore = true; - return 0; + /* Priorities must be initialized before ITMT support can be toggled on. */ + sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu); } -static void amd_pstate_boost_init(struct amd_cpudata *cpudata) +static void amd_pstate_update_limits(struct cpufreq_policy *policy) { - u32 highest_perf, nominal_perf; + struct amd_cpudata *cpudata; + u32 prev_high = 0, cur_high = 0; + bool highest_perf_changed = false; + unsigned int cpu = policy->cpu; - highest_perf = READ_ONCE(cpudata->highest_perf); - nominal_perf = READ_ONCE(cpudata->nominal_perf); + if (!amd_pstate_prefcore) + return; - if (highest_perf <= nominal_perf) + if (amd_get_highest_perf(cpu, &cur_high)) return; - cpudata->boost_supported = true; - current_pstate_driver->boost_enabled = true; -} + cpudata = policy->driver_data; -static void amd_perf_ctl_reset(unsigned int cpu) -{ - wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); + prev_high = READ_ONCE(cpudata->prefcore_ranking); + highest_perf_changed = (prev_high != cur_high); + if (highest_perf_changed) { + WRITE_ONCE(cpudata->prefcore_ranking, cur_high); + + if (cur_high < CPPC_MAX_PERF) { + sched_set_itmt_core_prio((int)cur_high, cpu); + sched_update_asym_prefer_cpu(cpu, prev_high, cur_high); + } + } } /* - * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks - * due to locking, so queue the work for later. + * Get pstate transition delay time from ACPI tables that firmware set + * instead of using hardcode value directly. */ -static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) +static u32 amd_pstate_get_transition_delay_us(unsigned int cpu) { - sched_set_itmt_support(); + u32 transition_delay_ns; + + transition_delay_ns = cppc_get_transition_latency(cpu); + if (transition_delay_ns == CPUFREQ_ETERNAL) { + if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC)) + return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY; + else + return AMD_PSTATE_TRANSITION_DELAY; + } + + return transition_delay_ns / NSEC_PER_USEC; } -static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); /* - * Get the highest performance register value. - * @cpu: CPU from which to get highest performance. - * @highest_perf: Return address. - * - * Return: 0 for success, -EIO otherwise. + * Get pstate transition latency value from ACPI tables that firmware + * set instead of using hardcode value directly. */ -static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) +static u32 amd_pstate_get_transition_latency(unsigned int cpu) { - int ret; + u32 transition_latency; - if (boot_cpu_has(X86_FEATURE_CPPC)) { - u64 cap1; + transition_latency = cppc_get_transition_latency(cpu); + if (transition_latency == CPUFREQ_ETERNAL) + return AMD_PSTATE_TRANSITION_LATENCY; - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); - } else { - u64 cppc_highest_perf; - - ret = cppc_get_highest_perf(cpu, &cppc_highest_perf); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, cppc_highest_perf); - } - - return (ret); + return transition_latency; } -#define CPPC_MAX_PERF U8_MAX - -static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) +/* + * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq + * for the @cpudata object. + * + * Requires: all perf members of @cpudata to be initialized. + * + * Returns 0 on success, non-zero value on failure. + */ +static int amd_pstate_init_freq(struct amd_cpudata *cpudata) { - int ret, prio; - u32 highest_perf; + u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq; + struct cppc_perf_caps cppc_perf; + union perf_cached perf; + int ret; - ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf); + ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) - return; - - cpudata->hw_prefcore = true; - /* check if CPPC preferred core feature is enabled*/ - if (highest_perf < CPPC_MAX_PERF) - prio = (int)highest_perf; - else { - pr_debug("AMD CPPC preferred core is unsupported!\n"); - cpudata->hw_prefcore = false; - return; - } - - if (!amd_pstate_prefcore) - return; - - /* - * The priorities can be set regardless of whether or not - * sched_set_itmt_support(true) has been called and it is valid to - * update them at any time after it has been called. - */ - sched_set_itmt_core_prio(prio, cpudata->cpu); + return ret; + perf = READ_ONCE(cpudata->perf); - schedule_work(&sched_prefcore_work); -} + if (quirks && quirks->nominal_freq) + nominal_freq = quirks->nominal_freq; + else + nominal_freq = cppc_perf.nominal_freq; + nominal_freq *= 1000; -static void amd_pstate_update_limits(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct amd_cpudata *cpudata = policy->driver_data; - u32 prev_high = 0, cur_high = 0; - int ret; - bool highest_perf_changed = false; + if (quirks && quirks->lowest_freq) { + min_freq = quirks->lowest_freq; + perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq); + WRITE_ONCE(cpudata->perf, perf); + } else + min_freq = cppc_perf.lowest_freq; - mutex_lock(&amd_pstate_driver_lock); - if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore)) - goto free_cpufreq_put; + min_freq *= 1000; - ret = amd_pstate_get_highest_perf(cpu, &cur_high); - if (ret) - goto free_cpufreq_put; + WRITE_ONCE(cpudata->nominal_freq, nominal_freq); - prev_high = READ_ONCE(cpudata->prefcore_ranking); - if (prev_high != cur_high) { - highest_perf_changed = true; - WRITE_ONCE(cpudata->prefcore_ranking, cur_high); + max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf); + lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf); + WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq); - if (cur_high < CPPC_MAX_PERF) - sched_set_itmt_core_prio((int)cur_high, cpu); + /** + * Below values need to be initialized correctly, otherwise driver will fail to load + * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf + * lowest_nonlinear_freq is a value between [min_freq, nominal_freq] + * Check _CPC in ACPI table objects if any values are incorrect + */ + if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) { + pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n", + min_freq, max_freq, nominal_freq); + return -EINVAL; } -free_cpufreq_put: - cpufreq_cpu_put(policy); - - if (!highest_perf_changed) - cpufreq_update_policy(cpu); + if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) { + pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n", + lowest_nonlinear_freq, min_freq, nominal_freq); + return -EINVAL; + } - mutex_unlock(&amd_pstate_driver_lock); + return 0; } static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; - struct device *dev; struct amd_cpudata *cpudata; + union perf_cached perf; + struct device *dev; + int ret; /* * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, @@ -849,41 +977,46 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; - amd_pstate_init_prefcore(cpudata); - ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; - min_freq = amd_get_min_freq(cpudata); - max_freq = amd_get_max_freq(cpudata); - nominal_freq = amd_get_nominal_freq(cpudata); - lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); + amd_pstate_init_prefcore(cpudata); - if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { - dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", - min_freq, max_freq); - ret = -EINVAL; + ret = amd_pstate_init_freq(cpudata); + if (ret) goto free_cpudata1; - } - policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; - policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; + ret = amd_pstate_init_boost_support(cpudata); + if (ret) + goto free_cpudata1; + + policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); + policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); + + perf = READ_ONCE(cpudata->perf); - policy->min = min_freq; - policy->max = max_freq; + policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, + cpudata->nominal_freq, + perf.lowest_perf); + policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, + cpudata->nominal_freq, + perf.highest_perf); - policy->cpuinfo.min_freq = min_freq; - policy->cpuinfo.max_freq = max_freq; + ret = amd_pstate_cppc_enable(policy); + if (ret) + goto free_cpudata1; + + policy->boost_supported = READ_ONCE(cpudata->boost_supported); /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; - if (boot_cpu_has(X86_FEATURE_CPPC)) + if (cpu_feature_enabled(X86_FEATURE_CPPC)) policy->fast_switch_possible = true; ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], - FREQ_QOS_MIN, policy->cpuinfo.min_freq); + FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); goto free_cpudata1; @@ -896,17 +1029,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) goto free_cpudata2; } - /* Initial processor data capability frequencies */ - cpudata->max_freq = max_freq; - cpudata->min_freq = min_freq; - cpudata->max_limit_freq = max_freq; - cpudata->min_limit_freq = min_freq; - cpudata->nominal_freq = nominal_freq; - cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; - policy->driver_data = cpudata; - amd_pstate_boost_init(cpudata); if (!current_pstate_driver->adjust_perf) current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; @@ -915,42 +1039,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) free_cpudata2: freq_qos_remove_request(&cpudata->req[0]); free_cpudata1: + pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); kfree(cpudata); return ret; } -static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) +static void amd_pstate_cpu_exit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + + /* Reset CPPC_REQ MSR to the BIOS value */ + amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); freq_qos_remove_request(&cpudata->req[1]); freq_qos_remove_request(&cpudata->req[0]); policy->fast_switch_possible = false; kfree(cpudata); - - return 0; -} - -static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) -{ - int ret; - - ret = amd_pstate_enable(true); - if (ret) - pr_err("failed to enable amd-pstate during resume, return %d\n", ret); - - return ret; -} - -static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) -{ - int ret; - - ret = amd_pstate_enable(false); - if (ret) - pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); - - return ret; } /* Sysfs attributes */ @@ -963,27 +1068,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, char *buf) { - int max_freq; - struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata; + union perf_cached perf; - max_freq = amd_get_max_freq(cpudata); - if (max_freq < 0) - return max_freq; + cpudata = policy->driver_data; + perf = READ_ONCE(cpudata->perf); - return sysfs_emit(buf, "%u\n", max_freq); + return sysfs_emit(buf, "%u\n", + perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf)); } static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy, char *buf) { - int freq; - struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata; + union perf_cached perf; - freq = amd_get_lowest_nonlinear_freq(cpudata); - if (freq < 0) - return freq; + cpudata = policy->driver_data; + perf = READ_ONCE(cpudata->perf); - return sysfs_emit(buf, "%u\n", freq); + return sysfs_emit(buf, "%u\n", + perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf)); } /* @@ -993,18 +1098,17 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, char *buf) { - u32 perf; - struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata; - perf = READ_ONCE(cpudata->highest_perf); + cpudata = policy->driver_data; - return sysfs_emit(buf, "%u\n", perf); + return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf); } static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy, char *buf) { - u32 perf; + u8 perf; struct amd_cpudata *cpudata = policy->driver_data; perf = READ_ONCE(cpudata->prefcore_ranking); @@ -1048,6 +1152,7 @@ static ssize_t store_energy_performance_preference( struct amd_cpudata *cpudata = policy->driver_data; char str_preference[21]; ssize_t ret; + u8 epp; ret = sscanf(buf, "%20s", str_preference); if (ret != 1) @@ -1057,50 +1162,100 @@ static ssize_t store_energy_performance_preference( if (ret < 0) return -EINVAL; - mutex_lock(&amd_pstate_limits_lock); - ret = amd_pstate_set_energy_pref_index(cpudata, ret); - mutex_unlock(&amd_pstate_limits_lock); + if (!ret) + epp = cpudata->epp_default; + else + epp = epp_values[ret]; + + if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + pr_debug("EPP cannot be set under performance policy\n"); + return -EBUSY; + } + + ret = amd_pstate_set_epp(policy, epp); - return ret ?: count; + return ret ? ret : count; } static ssize_t show_energy_performance_preference( struct cpufreq_policy *policy, char *buf) { struct amd_cpudata *cpudata = policy->driver_data; - int preference; + u8 preference, epp; + + epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); - preference = amd_pstate_get_energy_pref_index(cpudata); - if (preference < 0) - return preference; + switch (epp) { + case AMD_CPPC_EPP_PERFORMANCE: + preference = EPP_INDEX_PERFORMANCE; + break; + case AMD_CPPC_EPP_BALANCE_PERFORMANCE: + preference = EPP_INDEX_BALANCE_PERFORMANCE; + break; + case AMD_CPPC_EPP_BALANCE_POWERSAVE: + preference = EPP_INDEX_BALANCE_POWERSAVE; + break; + case AMD_CPPC_EPP_POWERSAVE: + preference = EPP_INDEX_POWERSAVE; + break; + default: + return -EINVAL; + } return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); } static void amd_pstate_driver_cleanup(void) { - amd_pstate_enable(false); + if (amd_pstate_prefcore) + sched_clear_itmt_support(); + cppc_state = AMD_PSTATE_DISABLE; current_pstate_driver = NULL; } +static int amd_pstate_set_driver(int mode_idx) +{ + if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { + cppc_state = mode_idx; + if (cppc_state == AMD_PSTATE_DISABLE) + pr_info("driver is explicitly disabled\n"); + + if (cppc_state == AMD_PSTATE_ACTIVE) + current_pstate_driver = &amd_pstate_epp_driver; + + if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) + current_pstate_driver = &amd_pstate_driver; + + return 0; + } + + return -EINVAL; +} + static int amd_pstate_register_driver(int mode) { int ret; - if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; - else if (mode == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; - else - return -EINVAL; + ret = amd_pstate_set_driver(mode); + if (ret) + return ret; cppc_state = mode; + + /* at least one CPU supports CPB */ + current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB); + ret = cpufreq_register_driver(current_pstate_driver); if (ret) { amd_pstate_driver_cleanup(); return ret; } + + /* Enable ITMT support once all CPUs have initialized their asym priorities. */ + if (amd_pstate_prefcore) + sched_set_itmt_support(); + return 0; } @@ -1117,7 +1272,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) cppc_state = mode; - if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) + if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) return 0; for_each_present_cpu(cpu) { @@ -1177,7 +1332,13 @@ static ssize_t amd_pstate_show_status(char *buf) return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); } -static int amd_pstate_update_status(const char *buf, size_t size) +int amd_pstate_get_status(void) +{ + return cppc_state; +} +EXPORT_SYMBOL_GPL(amd_pstate_get_status); + +int amd_pstate_update_status(const char *buf, size_t size) { int mode_idx; @@ -1189,22 +1350,22 @@ static int amd_pstate_update_status(const char *buf, size_t size) if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) return -EINVAL; - if (mode_state_machine[cppc_state][mode_idx]) + if (mode_state_machine[cppc_state][mode_idx]) { + guard(mutex)(&amd_pstate_driver_lock); return mode_state_machine[cppc_state][mode_idx](mode_idx); + } return 0; } +EXPORT_SYMBOL_GPL(amd_pstate_update_status); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { - ssize_t ret; - mutex_lock(&amd_pstate_driver_lock); - ret = amd_pstate_show_status(buf); - mutex_unlock(&amd_pstate_driver_lock); + guard(mutex)(&amd_pstate_driver_lock); - return ret; + return amd_pstate_show_status(buf); } static ssize_t status_store(struct device *a, struct device_attribute *b, @@ -1213,9 +1374,7 @@ static ssize_t status_store(struct device *a, struct device_attribute *b, char *p = memchr(buf, '\n', count); int ret; - mutex_lock(&amd_pstate_driver_lock); ret = amd_pstate_update_status(buf, p ? p - buf : count); - mutex_unlock(&amd_pstate_driver_lock); return ret < 0 ? ret : count; } @@ -1290,10 +1449,10 @@ static bool amd_pstate_acpi_pm_profile_undefined(void) static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; struct amd_cpudata *cpudata; + union perf_cached perf; struct device *dev; - u64 value; + int ret; /* * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, @@ -1309,267 +1468,179 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) return -ENOMEM; cpudata->cpu = policy->cpu; - cpudata->epp_policy = 0; - - amd_pstate_init_prefcore(cpudata); ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; - min_freq = amd_get_min_freq(cpudata); - max_freq = amd_get_max_freq(cpudata); - nominal_freq = amd_get_nominal_freq(cpudata); - lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); - if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { - dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", - min_freq, max_freq); - ret = -EINVAL; + amd_pstate_init_prefcore(cpudata); + + ret = amd_pstate_init_freq(cpudata); + if (ret) goto free_cpudata1; - } - policy->cpuinfo.min_freq = min_freq; - policy->cpuinfo.max_freq = max_freq; - /* It will be updated by governor */ - policy->cur = policy->cpuinfo.min_freq; + ret = amd_pstate_init_boost_support(cpudata); + if (ret) + goto free_cpudata1; - /* Initial processor data capability frequencies */ - cpudata->max_freq = max_freq; - cpudata->min_freq = min_freq; - cpudata->nominal_freq = nominal_freq; - cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; + perf = READ_ONCE(cpudata->perf); + policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, + cpudata->nominal_freq, + perf.lowest_perf); + policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, + cpudata->nominal_freq, + perf.highest_perf); policy->driver_data = cpudata; - cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); + ret = amd_pstate_cppc_enable(policy); + if (ret) + goto free_cpudata1; + + /* It will be updated by governor */ + policy->cur = policy->cpuinfo.min_freq; - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; + + policy->boost_supported = READ_ONCE(cpudata->boost_supported); /* * Set the policy to provide a valid fallback value in case * the default cpufreq governor is neither powersave nor performance. */ if (amd_pstate_acpi_pm_profile_server() || - amd_pstate_acpi_pm_profile_undefined()) + amd_pstate_acpi_pm_profile_undefined()) { policy->policy = CPUFREQ_POLICY_PERFORMANCE; - else + cpudata->epp_default = amd_pstate_get_epp(cpudata); + } else { policy->policy = CPUFREQ_POLICY_POWERSAVE; + cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE; + } - if (boot_cpu_has(X86_FEATURE_CPPC)) { - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); - if (ret) - return ret; - WRITE_ONCE(cpudata->cppc_req_cached, value); + ret = amd_pstate_set_epp(policy, cpudata->epp_default); + if (ret) + return ret; - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value); - if (ret) - return ret; - WRITE_ONCE(cpudata->cppc_cap1_cached, value); - } - amd_pstate_boost_init(cpudata); + current_pstate_driver->adjust_perf = NULL; return 0; free_cpudata1: + pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); kfree(cpudata); return ret; } -static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) -{ - pr_debug("CPU %d exiting\n", policy->cpu); - return 0; -} - -static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) +static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - u32 max_perf, min_perf, min_limit_perf, max_limit_perf; - u64 value; - s16 epp; - - max_perf = READ_ONCE(cpudata->highest_perf); - min_perf = READ_ONCE(cpudata->lowest_perf); - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); - - if (min_limit_perf < min_perf) - min_limit_perf = min_perf; - - if (max_limit_perf < min_limit_perf) - max_limit_perf = min_limit_perf; - WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); - WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); - - max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, - cpudata->max_limit_perf); - min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, - cpudata->max_limit_perf); - value = READ_ONCE(cpudata->cppc_req_cached); - - if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) - min_perf = max_perf; + if (cpudata) { + union perf_cached perf = READ_ONCE(cpudata->perf); - /* Initial min/max values for CPPC Performance Controls Register */ - value &= ~AMD_CPPC_MIN_PERF(~0L); - value |= AMD_CPPC_MIN_PERF(min_perf); + /* Reset CPPC_REQ MSR to the BIOS value */ + amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); - value &= ~AMD_CPPC_MAX_PERF(~0L); - value |= AMD_CPPC_MAX_PERF(max_perf); + kfree(cpudata); + policy->driver_data = NULL; + } - /* CPPC EPP feature require to set zero to the desire perf bit */ - value &= ~AMD_CPPC_DES_PERF(~0L); - value |= AMD_CPPC_DES_PERF(0); + pr_debug("CPU %d exiting\n", policy->cpu); +} - cpudata->epp_policy = cpudata->policy; +static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) +{ + struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf; + u8 epp; - /* Get BIOS pre-defined epp value */ - epp = amd_pstate_get_epp(cpudata, value); - if (epp < 0) { - /** - * This return value can only be negative for shared_memory - * systems where EPP register read/write not supported. - */ - return; - } + if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) + amd_pstate_update_min_max_limit(policy); if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) epp = 0; + else + epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); - /* Set initial EPP value */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { - value &= ~GENMASK_ULL(31, 24); - value |= (u64)epp << 24; - } + perf = READ_ONCE(cpudata->perf); - WRITE_ONCE(cpudata->cppc_req_cached, value); - amd_pstate_set_epp(cpudata, epp); + return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U, + perf.max_limit_perf, epp, false); } static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + int ret; if (!policy->cpuinfo.max_freq) return -ENODEV; - pr_debug("set_policy: cpuinfo.max %u policy->max %u\n", - policy->cpuinfo.max_freq, policy->max); - cpudata->policy = policy->policy; - amd_pstate_epp_update_limit(policy); - - return 0; -} - -static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) -{ - struct cppc_perf_ctrls perf_ctrls; - u64 value, max_perf; - int ret; - - ret = amd_pstate_enable(true); + ret = amd_pstate_epp_update_limit(policy); if (ret) - pr_err("failed to enable amd pstate during resume, return %d\n", ret); + return ret; - value = READ_ONCE(cpudata->cppc_req_cached); - max_perf = READ_ONCE(cpudata->highest_perf); + /* + * policy->cur is never updated with the amd_pstate_epp driver, but it + * is used as a stale frequency value. So, keep it within limits. + */ + policy->cur = policy->min; - if (boot_cpu_has(X86_FEATURE_CPPC)) { - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); - } else { - perf_ctrls.max_perf = max_perf; - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); - cppc_set_perf(cpudata->cpu, &perf_ctrls); - } + return 0; } -static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) +static int amd_pstate_cpu_online(struct cpufreq_policy *policy) { - struct amd_cpudata *cpudata = policy->driver_data; - - pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); - - if (cppc_state == AMD_PSTATE_ACTIVE) { - amd_pstate_epp_reenable(cpudata); - cpudata->suspended = false; - } - - return 0; + return amd_pstate_cppc_enable(policy); } -static void amd_pstate_epp_offline(struct cpufreq_policy *policy) +static int amd_pstate_cpu_offline(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - struct cppc_perf_ctrls perf_ctrls; - int min_perf; - u64 value; + union perf_cached perf = READ_ONCE(cpudata->perf); - min_perf = READ_ONCE(cpudata->lowest_perf); - value = READ_ONCE(cpudata->cppc_req_cached); - - mutex_lock(&amd_pstate_limits_lock); - if (boot_cpu_has(X86_FEATURE_CPPC)) { - cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; - - /* Set max perf same as min perf */ - value &= ~AMD_CPPC_MAX_PERF(~0L); - value |= AMD_CPPC_MAX_PERF(min_perf); - value &= ~AMD_CPPC_MIN_PERF(~0L); - value |= AMD_CPPC_MIN_PERF(min_perf); - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); - } else { - perf_ctrls.desired_perf = 0; - perf_ctrls.max_perf = min_perf; - perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); - cppc_set_perf(cpudata->cpu, &perf_ctrls); - } - mutex_unlock(&amd_pstate_limits_lock); + /* + * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified + * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the + * limits, epp and desired perf will get reset to the cached values in cpudata struct + */ + return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); } -static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) +static int amd_pstate_suspend(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + int ret; - pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); - - if (cpudata->suspended) - return 0; + /* + * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified + * min_perf value across kexec reboots. If this CPU is just resumed back without kexec, + * the limits, epp and desired perf will get reset to the cached values in cpudata struct + */ + ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); + if (ret) + return ret; - if (cppc_state == AMD_PSTATE_ACTIVE) - amd_pstate_epp_offline(policy); + /* invalidate to ensure it's rewritten during resume */ + cpudata->cppc_req_cached = 0; - return 0; -} + /* set this flag to avoid setting core offline*/ + cpudata->suspended = true; -static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) -{ - cpufreq_verify_within_cpu_limits(policy); - pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min); return 0; } -static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) +static int amd_pstate_resume(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - int ret; - - /* avoid suspending when EPP is not enabled */ - if (cppc_state != AMD_PSTATE_ACTIVE) - return 0; + union perf_cached perf = READ_ONCE(cpudata->perf); + int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur); - /* set this flag to avoid setting core offline*/ - cpudata->suspended = true; - - /* disable CPPC in lowlevel firmware */ - ret = amd_pstate_enable(false); - if (ret) - pr_err("failed to suspend, return %d\n", ret); - - return 0; + /* Set CPPC_REQ to last sane value until the governor updates it */ + return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf, + 0U, false); } static int amd_pstate_epp_resume(struct cpufreq_policy *policy) @@ -1577,12 +1648,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) struct amd_cpudata *cpudata = policy->driver_data; if (cpudata->suspended) { - mutex_lock(&amd_pstate_limits_lock); + int ret; /* enable amd pstate from suspend state*/ - amd_pstate_epp_reenable(cpudata); - - mutex_unlock(&amd_pstate_limits_lock); + ret = amd_pstate_epp_update_limit(policy); + if (ret) + return ret; cpudata->suspended = false; } @@ -1597,8 +1668,10 @@ static struct cpufreq_driver amd_pstate_driver = { .fast_switch = amd_pstate_fast_switch, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, - .suspend = amd_pstate_cpu_suspend, - .resume = amd_pstate_cpu_resume, + .online = amd_pstate_cpu_online, + .offline = amd_pstate_cpu_offline, + .suspend = amd_pstate_suspend, + .resume = amd_pstate_resume, .set_boost = amd_pstate_set_boost, .update_limits = amd_pstate_update_limits, .name = "amd-pstate", @@ -1607,36 +1680,72 @@ static struct cpufreq_driver amd_pstate_driver = { static struct cpufreq_driver amd_pstate_epp_driver = { .flags = CPUFREQ_CONST_LOOPS, - .verify = amd_pstate_epp_verify_policy, + .verify = amd_pstate_verify, .setpolicy = amd_pstate_epp_set_policy, .init = amd_pstate_epp_cpu_init, .exit = amd_pstate_epp_cpu_exit, - .offline = amd_pstate_epp_cpu_offline, - .online = amd_pstate_epp_cpu_online, - .suspend = amd_pstate_epp_suspend, + .offline = amd_pstate_cpu_offline, + .online = amd_pstate_cpu_online, + .suspend = amd_pstate_suspend, .resume = amd_pstate_epp_resume, .update_limits = amd_pstate_update_limits, + .set_boost = amd_pstate_set_boost, .name = "amd-pstate-epp", .attr = amd_pstate_epp_attr, }; -static int __init amd_pstate_set_driver(int mode_idx) +/* + * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F. + * show the debug message that helps to check if the CPU has CPPC support for loading issue. + */ +static bool amd_cppc_supported(void) { - if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { - cppc_state = mode_idx; - if (cppc_state == AMD_PSTATE_DISABLE) - pr_info("driver is explicitly disabled\n"); + struct cpuinfo_x86 *c = &cpu_data(0); + bool warn = false; - if (cppc_state == AMD_PSTATE_ACTIVE) - current_pstate_driver = &amd_pstate_epp_driver; - - if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) - current_pstate_driver = &amd_pstate_driver; + if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) { + pr_debug_once("CPPC feature is not supported by the processor\n"); + return false; + } - return 0; + /* + * If the CPPC feature is disabled in the BIOS for processors + * that support MSR-based CPPC, the AMD Pstate driver may not + * function correctly. + * + * For such processors, check the CPPC flag and display a + * warning message if the platform supports CPPC. + * + * Note: The code check below will not abort the driver + * registration process because of the code is added for + * debugging purposes. Besides, it may still be possible for + * the driver to work using the shared-memory mechanism. + */ + if (!cpu_feature_enabled(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { + switch (c->x86_model) { + case 0x60 ... 0x6F: + case 0x80 ... 0xAF: + warn = true; + break; + } + } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || + cpu_feature_enabled(X86_FEATURE_ZEN4)) { + switch (c->x86_model) { + case 0x10 ... 0x1F: + case 0x40 ... 0xAF: + warn = true; + break; + } + } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) { + warn = true; + } } - return -EINVAL; + if (warn) + pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n" + "Please enable it if your BIOS has the CPPC option.\n"); + return true; } static int __init amd_pstate_init(void) @@ -1647,6 +1756,11 @@ static int __init amd_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return -ENODEV; + /* show debug message only if CPPC is not supported */ + if (!amd_cppc_supported()) + return -EOPNOTSUPP; + + /* show warning message when BIOS broken or ACPI disabled */ if (!acpi_cpc_valid()) { pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); return -ENODEV; @@ -1656,55 +1770,59 @@ static int __init amd_pstate_init(void) if (cpufreq_get_current_driver()) return -EEXIST; - switch (cppc_state) { - case AMD_PSTATE_UNDEFINED: + quirks = NULL; + + /* check if this machine need CPPC quirks */ + dmi_check_system(amd_pstate_quirks_table); + + /* + * determine the driver mode from the command line or kernel config. + * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. + * command line options will override the kernel config settings. + */ + + if (cppc_state == AMD_PSTATE_UNDEFINED) { /* Disable on the following configs by default: * 1. Undefined platforms - * 2. Server platforms - * 3. Shared memory designs + * 2. Server platforms with CPUs older than Family 0x1A. */ if (amd_pstate_acpi_pm_profile_undefined() || - amd_pstate_acpi_pm_profile_server() || - !boot_cpu_has(X86_FEATURE_CPPC)) { + (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) { pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } - ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); - if (ret) - return ret; - break; - case AMD_PSTATE_DISABLE: + /* get driver mode from kernel config option [1:4] */ + cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; + } + + if (cppc_state == AMD_PSTATE_DISABLE) { + pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; - case AMD_PSTATE_PASSIVE: - case AMD_PSTATE_ACTIVE: - case AMD_PSTATE_GUIDED: - break; - default: - return -EINVAL; } /* capability check */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { pr_debug("AMD CPPC MSR based functionality is supported\n"); - if (cppc_state != AMD_PSTATE_ACTIVE) - current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; } else { pr_debug("AMD CPPC shared memory based functionality is supported\n"); - static_call_update(amd_pstate_enable, cppc_enable); - static_call_update(amd_pstate_init_perf, cppc_init_perf); - static_call_update(amd_pstate_update_perf, cppc_update_perf); + static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable); + static_call_update(amd_pstate_init_perf, shmem_init_perf); + static_call_update(amd_pstate_update_perf, shmem_update_perf); + static_call_update(amd_pstate_get_epp, shmem_get_epp); + static_call_update(amd_pstate_set_epp, shmem_set_epp); } - /* enable amd pstate feature */ - ret = amd_pstate_enable(true); - if (ret) { - pr_err("failed to enable with return %d\n", ret); - return ret; + if (amd_pstate_prefcore) { + ret = amd_detect_prefcore(&amd_pstate_prefcore); + if (ret) + return ret; } - ret = cpufreq_register_driver(current_pstate_driver); - if (ret) + ret = amd_pstate_register_driver(cppc_state); + if (ret) { pr_err("failed to register with return %d\n", ret); + return ret; + } dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h new file mode 100644 index 000000000000..cb45fdca27a6 --- /dev/null +++ b/drivers/cpufreq/amd-pstate.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * + * Author: Meng Li <li.meng@amd.com> + */ + +#ifndef _LINUX_AMD_PSTATE_H +#define _LINUX_AMD_PSTATE_H + +#include <linux/pm_qos.h> + +/********************************************************************* + * AMD P-state INTERFACE * + *********************************************************************/ + +/** + * union perf_cached - A union to cache performance-related data. + * @highest_perf: the maximum performance an individual processor may reach, + * assuming ideal conditions + * For platforms that support the preferred core feature, the highest_perf value maybe + * configured to any value in the range 166-255 by the firmware (because the preferred + * core ranking is encoded in the highest_perf value). To maintain consistency across + * all platforms, we split the highest_perf and preferred core ranking values into + * cpudata->perf.highest_perf and cpudata->prefcore_ranking. + * @nominal_perf: the maximum sustained performance level of the processor, + * assuming ideal operating conditions + * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power + * savings are achieved + * @lowest_perf: the absolute lowest performance level of the processor + * @min_limit_perf: Cached value of the performance corresponding to policy->min + * @max_limit_perf: Cached value of the performance corresponding to policy->max + * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option + */ +union perf_cached { + struct { + u8 highest_perf; + u8 nominal_perf; + u8 lowest_nonlinear_perf; + u8 lowest_perf; + u8 min_limit_perf; + u8 max_limit_perf; + u8 bios_min_perf; + }; + u64 val; +}; + +/** + * struct amd_aperf_mperf + * @aperf: actual performance frequency clock count + * @mperf: maximum performance frequency clock count + * @tsc: time stamp counter + */ +struct amd_aperf_mperf { + u64 aperf; + u64 mperf; + u64 tsc; +}; + +/** + * struct amd_cpudata - private CPU data for AMD P-State + * @cpu: CPU number + * @req: constraint request to apply + * @cppc_req_cached: cached performance request hints + * @perf: cached performance-related data + * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher + * priority. + * @min_limit_freq: Cached value of policy->min (in khz) + * @max_limit_freq: Cached value of policy->max (in khz) + * @nominal_freq: the frequency (in khz) that mapped to nominal_perf + * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf + * @cur: Difference of Aperf/Mperf/tsc count between last and current sample + * @prev: Last Aperf/Mperf/tsc count value read from register + * @freq: current cpu frequency value (in khz) + * @boost_supported: check whether the Processor or SBIOS supports boost mode + * @hw_prefcore: check whether HW supports preferred core featue. + * Only when hw_prefcore and early prefcore param are true, + * AMD P-State driver supports preferred core featue. + * @epp_cached: Cached CPPC energy-performance preference value + * @policy: Cpufreq policy value + * + * The amd_cpudata is key private data for each CPU thread in AMD P-State, and + * represents all the attributes and goals that AMD P-State requests at runtime. + */ +struct amd_cpudata { + int cpu; + + struct freq_qos_request req[2]; + u64 cppc_req_cached; + + union perf_cached perf; + + u8 prefcore_ranking; + u32 min_limit_freq; + u32 max_limit_freq; + u32 nominal_freq; + u32 lowest_nonlinear_freq; + + struct amd_aperf_mperf cur; + struct amd_aperf_mperf prev; + + u64 freq; + bool boost_supported; + bool hw_prefcore; + + /* EPP feature related attributes*/ + u32 policy; + bool suspended; + u8 epp_default; +}; + +/* + * enum amd_pstate_mode - driver working mode of amd pstate + */ +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, + AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, +}; +const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode); +int amd_pstate_get_status(void); +int amd_pstate_update_status(const char *buf, size_t size); + +#endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index 59b19b9975e8..13fed4b9e02b 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void) pci_dev_put(pcidev); } - if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) + if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) return -ENODEV; if (!(val >> CLASS_CODE_SHIFT)) diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c index 021f423705e1..b1d29b7af232 100644 --- a/drivers/cpufreq/apple-soc-cpufreq.c +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -22,11 +22,14 @@ #include <linux/pm_opp.h> #include <linux/slab.h> -#define APPLE_DVFS_CMD 0x20 -#define APPLE_DVFS_CMD_BUSY BIT(31) -#define APPLE_DVFS_CMD_SET BIT(25) -#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12) -#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0) +#define APPLE_DVFS_CMD 0x20 +#define APPLE_DVFS_CMD_BUSY BIT(31) +#define APPLE_DVFS_CMD_SET BIT(25) +#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22) +#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22 +#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12) +#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0) +#define APPLE_DVFS_CMD_PS1_SHIFT 0 /* Same timebase as CPU counter (24MHz) */ #define APPLE_DVFS_LAST_CHG_TIME 0x38 @@ -35,6 +38,9 @@ * Apple ran out of bits and had to shift this in T8112... */ #define APPLE_DVFS_STATUS 0x50 +#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3) +#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3 +#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0) #define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4) #define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4 #define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0) @@ -52,12 +58,15 @@ #define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16) #define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0) -#define APPLE_DVFS_TRANSITION_TIMEOUT 100 +#define APPLE_DVFS_TRANSITION_TIMEOUT 400 struct apple_soc_cpufreq_info { + bool has_ps2; u64 max_pstate; u64 cur_pstate_mask; u64 cur_pstate_shift; + u64 ps1_mask; + u64 ps1_shift; }; struct apple_cpu_priv { @@ -68,24 +77,46 @@ struct apple_cpu_priv { static struct cpufreq_driver apple_soc_cpufreq_driver; +static const struct apple_soc_cpufreq_info soc_s5l8960x_info = { + .has_ps2 = false, + .max_pstate = 7, + .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X, + .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X, + .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X, + .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT, +}; + static const struct apple_soc_cpufreq_info soc_t8103_info = { + .has_ps2 = true, .max_pstate = 15, .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103, .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103, + .ps1_mask = APPLE_DVFS_CMD_PS1, + .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT, }; static const struct apple_soc_cpufreq_info soc_t8112_info = { + .has_ps2 = false, .max_pstate = 31, .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112, .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112, + .ps1_mask = APPLE_DVFS_CMD_PS1, + .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT, }; static const struct apple_soc_cpufreq_info soc_default_info = { + .has_ps2 = false, .max_pstate = 15, .cur_pstate_mask = 0, /* fallback */ + .ps1_mask = APPLE_DVFS_CMD_PS1, + .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT, }; -static const struct of_device_id apple_soc_cpufreq_of_match[] = { +static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = { + { + .compatible = "apple,s5l8960x-cluster-cpufreq", + .data = &soc_s5l8960x_info, + }, { .compatible = "apple,t8103-cluster-cpufreq", .data = &soc_t8103_info, @@ -103,13 +134,19 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] = { static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct apple_cpu_priv *priv = policy->driver_data; + struct cpufreq_policy *policy; + struct apple_cpu_priv *priv; struct cpufreq_frequency_table *p; unsigned int pstate; + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + if (priv->info->cur_pstate_mask) { - u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS); + u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS); pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift; } else { @@ -148,9 +185,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy, return -EIO; } - reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2); - reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate); - reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate); + reg &= ~priv->info->ps1_mask; + reg |= pstate << priv->info->ps1_shift; + if (priv->info->has_ps2) { + reg &= ~APPLE_DVFS_CMD_PS2; + reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate); + } reg |= APPLE_DVFS_CMD_SET; writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD); @@ -195,12 +235,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy, return 0; } -static struct freq_attr *apple_soc_cpufreq_hw_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, /* Filled in below if boost is enabled */ - NULL, -}; - static int apple_soc_cpufreq_init(struct cpufreq_policy *policy) { int ret, i; @@ -275,23 +309,13 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy) transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; + transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC; policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; policy->fast_switch_possible = true; policy->suspend_freq = freq_table[0].frequency; - if (policy_has_boost_freq(policy)) { - ret = cpufreq_enable_boost_support(); - if (ret) { - dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); - } else { - apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; - apple_soc_cpufreq_driver.boost_enabled = true; - } - } - return 0; out_free_cpufreq_table: @@ -305,7 +329,7 @@ out_iounmap: return ret; } -static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) +static void apple_soc_cpufreq_exit(struct cpufreq_policy *policy) { struct apple_cpu_priv *priv = policy->driver_data; @@ -313,8 +337,6 @@ static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_remove_all_dynamic(priv->cpu_dev); iounmap(priv->reg_base); kfree(priv); - - return 0; } static struct cpufreq_driver apple_soc_cpufreq_driver = { @@ -328,7 +350,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = { .target_index = apple_soc_cpufreq_set_target, .fast_switch = apple_soc_cpufreq_fast_switch, .register_em = cpufreq_register_em_with_opp, - .attr = apple_soc_cpufreq_hw_attr, + .set_boost = cpufreq_boost_set_sw, .suspend = cpufreq_generic_suspend, }; diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index bea41ccabf1f..f28a4435fba7 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -102,11 +102,7 @@ struct armada_37xx_dvfs { }; static struct armada_37xx_dvfs armada_37xx_dvfs[] = { - /* - * The cpufreq scaling for 1.2 GHz variant of the SOC is currently - * unstable because we do not know how to configure it properly. - */ - /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */ + {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} }, {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} }, {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} }, diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index ce5a5641b6dd..5a3545bd0d8d 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, { int cpu; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev; struct clk *clk; @@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void) int ret = 0, opps_index = 0, cpu, nb_cpus; struct freq_table *freq_tables; struct device_node *node; - struct cpumask cpus; + static struct cpumask cpus; node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match, NULL); diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c index 39221a9a187a..36051880640b 100644 --- a/drivers/cpufreq/bmips-cpufreq.c +++ b/drivers/cpufreq/bmips-cpufreq.c @@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy, return 0; } -static int bmips_cpufreq_exit(struct cpufreq_policy *policy) +static void bmips_cpufreq_exit(struct cpufreq_policy *policy) { kfree(policy->freq_table); - - return 0; } static int bmips_cpufreq_init(struct cpufreq_policy *policy) @@ -152,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = { .get = bmips_cpufreq_get, .init = bmips_cpufreq_init, .exit = bmips_cpufreq_exit, - .attr = cpufreq_generic_attr, .name = BMIPS_CPUFREQ_PREFIX, }; diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 1a1857b0a6f4..7b841a086acc 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -474,16 +474,19 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) rc = brcm_avs_get_pmap(priv, NULL); magic = readl(priv->base + AVS_MBOX_MAGIC); - return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) || - (rc != -EINVAL)); + return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) && + (rc != -EINVAL); } static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct private_data *priv; + if (!policy) return 0; - struct private_data *priv = policy->driver_data; + + priv = policy->driver_data; cpufreq_cpu_put(policy); @@ -717,7 +720,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage); cpufreq_freq_attr_ro(brcm_avs_frequency); static struct freq_attr *brcm_avs_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, &brcm_avs_pstate, &brcm_avs_mode, &brcm_avs_pmap, @@ -774,7 +776,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = { .of_match_table = brcm_avs_cpufreq_match, }, .probe = brcm_avs_cpufreq_probe, - .remove_new = brcm_avs_cpufreq_remove, + .remove = brcm_avs_cpufreq_remove, }; module_platform_driver(brcm_avs_cpufreq_platdrv); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 64420d9cfd1e..b7c688a5659c 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -22,7 +22,7 @@ #include <linux/vmalloc.h> #include <uapi/linux/sched/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <acpi/cppc_acpi.h> @@ -34,35 +34,15 @@ */ static LIST_HEAD(cpu_data_list); -static bool boost_supported; - -struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE + 1]; - char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; - u32 oem_revision; -}; - -static struct cppc_workaround_oem_info wa_info[] = { - { - .oem_id = "HISI ", - .oem_table_id = "HIP07 ", - .oem_revision = 0, - }, { - .oem_id = "HISI ", - .oem_table_id = "HIP08 ", - .oem_revision = 0, - } -}; - static struct cpufreq_driver cppc_cpufreq_driver; +#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE static enum { FIE_UNSET = -1, FIE_ENABLED, FIE_DISABLED } fie_disabled = FIE_UNSET; -#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE module_param(fie_disabled, int, 0444); MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)"); @@ -78,7 +58,6 @@ struct cppc_freq_invariance { static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); static struct kthread_worker *kworker_fie; -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1); @@ -118,6 +97,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work) perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, &fb_ctrs); + if (!perf) + return; + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; perf <<= SCHED_CAPACITY_SHIFT; @@ -224,9 +206,9 @@ static void __init cppc_freq_invariance_init(void) * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. */ - .sched_runtime = 1000000, - .sched_deadline = 10000000, - .sched_period = 10000000, + .sched_runtime = NSEC_PER_MSEC, + .sched_deadline = 10 * NSEC_PER_MSEC, + .sched_period = 10 * NSEC_PER_MSEC, }; int ret; @@ -241,7 +223,7 @@ static void __init cppc_freq_invariance_init(void) if (fie_disabled) return; - kworker_fie = kthread_create_worker(0, "cppc_fie"); + kworker_fie = kthread_run_worker(0, "cppc_fie"); if (IS_ERR(kworker_fie)) { pr_warn("%s: failed to create kworker_fie: %ld\n", __func__, PTR_ERR(kworker_fie)); @@ -291,15 +273,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, struct cppc_cpudata *cpu_data = policy->driver_data; unsigned int cpu = policy->cpu; struct cpufreq_freqs freqs; - u32 desired_perf; int ret = 0; - desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); - /* Return if it is exactly the same perf */ - if (desired_perf == cpu_data->perf_ctrls.desired_perf) - return ret; - - cpu_data->perf_ctrls.desired_perf = desired_perf; + cpu_data->perf_ctrls.desired_perf = + cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); freqs.old = policy->cur; freqs.new = target_freq; @@ -425,6 +402,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev, struct cppc_cpudata *cpu_data; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -492,6 +472,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, int step; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -626,7 +609,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf); - policy->max = cppc_perf_to_khz(caps, caps->nominal_perf); + policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ? + caps->highest_perf : caps->nominal_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance @@ -634,7 +618,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * nonlinear perf */ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf); - policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf); + policy->cpuinfo.max_freq = policy->max; policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); policy->shared_type = cpu_data->shared_type; @@ -667,7 +651,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * is supported. */ if (caps->highest_perf > caps->nominal_perf) - boost_supported = true; + policy->boost_supported = true; /* Set policy->cur to max now. The governors will adjust later. */ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf); @@ -688,7 +672,7 @@ out: return ret; } -static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu_data = policy->driver_data; struct cppc_perf_caps *caps = &cpu_data->perf_caps; @@ -705,7 +689,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) caps->lowest_perf, cpu, ret); cppc_cpufreq_put_cpu_data(policy); - return 0; } static inline u64 get_delta(u64 t1, u64 t0) @@ -730,35 +713,72 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, delta_delivered = get_delta(fb_ctrs_t1->delivered, fb_ctrs_t0->delivered); - /* Check to avoid divide-by zero and invalid delivered_perf */ + /* + * Avoid divide-by zero and unchanged feedback counters. + * Leave it for callers to handle. + */ if (!delta_reference || !delta_delivered) - return cpu_data->perf_ctrls.desired_perf; + return 0; return (reference_perf * delta_delivered) / delta_reference; } +static int cppc_get_perf_ctrs_sample(int cpu, + struct cppc_perf_fb_ctrs *fb_ctrs_t0, + struct cppc_perf_fb_ctrs *fb_ctrs_t1) +{ + int ret; + + ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0); + if (ret) + return ret; + + udelay(2); /* 2usec delay between sampling */ + + return cppc_get_perf_ctrs(cpu, fb_ctrs_t1); +} + static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_cpudata *cpu_data; u64 delivered_perf; int ret; - cpufreq_cpu_put(policy); - - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); - if (ret) + if (!policy) return 0; - udelay(2); /* 2usec delay between sampling */ + cpu_data = policy->driver_data; - ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); - if (ret) - return 0; + cpufreq_cpu_put(policy); + + ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1); + if (ret) { + if (ret == -EFAULT) + /* Any of the associated CPPC regs is 0. */ + goto out_invalid_counters; + else + return 0; + } delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, &fb_ctrs_t1); + if (!delivered_perf) + goto out_invalid_counters; + + return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); + +out_invalid_counters: + /* + * Feedback counters could be unchanged or 0 when a cpu enters a + * low-power idle state, e.g. clock-gated or power-gated. + * Use desired perf for reflecting frequency. Get the latest register + * value first as some platforms may update the actual delivered perf + * there; if failed, resort to the cached desired perf. + */ + if (cppc_get_desired_perf(cpu, &delivered_perf)) + delivered_perf = cpu_data->perf_ctrls.desired_perf; return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); } @@ -769,11 +789,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) struct cppc_perf_caps *caps = &cpu_data->perf_caps; int ret; - if (!boost_supported) { - pr_err("BOOST not supported by CPU or firmware\n"); - return -EINVAL; - } - if (state) policy->max = cppc_perf_to_khz(caps, caps->highest_perf); else @@ -793,10 +808,119 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); } + +static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf) +{ + bool val; + int ret; + + ret = cppc_get_auto_sel(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t store_auto_select(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + ret = cppc_set_auto_sel(policy->cpu, val); + if (ret) + return ret; + + return count; +} + +static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_auto_act_window(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_auto_act_window(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 usec; + int ret; + + ret = kstrtou64(buf, 0, &usec); + if (ret) + return ret; + + ret = cppc_set_auto_act_window(policy->cpu, usec); + if (ret) + return ret; + + return count; +} + +static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_epp_perf(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 val; + int ret; + + ret = kstrtou64(buf, 0, &val); + if (ret) + return ret; + + ret = cppc_set_epp(policy->cpu, val); + if (ret) + return ret; + + return count; +} + cpufreq_freq_attr_ro(freqdomain_cpus); +cpufreq_freq_attr_rw(auto_select); +cpufreq_freq_attr_rw(auto_act_window); +cpufreq_freq_attr_rw(energy_performance_preference_val); static struct freq_attr *cppc_cpufreq_attr[] = { &freqdomain_cpus, + &auto_select, + &auto_act_window, + &energy_performance_preference_val, NULL, }; @@ -813,52 +937,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .name = "cppc_cpufreq", }; -/* - * HISI platform does not support delivered performance counter and - * reference performance counter. It can calculate the performance using the - * platform specific mechanism. We reuse the desired performance register to - * store the real performance calculated by the platform. - */ -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; - u64 desired_perf; - int ret; - - cpufreq_cpu_put(policy); - - ret = cppc_get_desired_perf(cpu, &desired_perf); - if (ret < 0) - return -EIO; - - return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf); -} - -static void cppc_check_hisi_workaround(void) -{ - struct acpi_table_header *tbl; - acpi_status status = AE_OK; - int i; - - status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); - if (ACPI_FAILURE(status) || !tbl) - return; - - for (i = 0; i < ARRAY_SIZE(wa_info); i++) { - if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && - !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) { - /* Overwrite the get() callback */ - cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; - fie_disabled = FIE_DISABLED; - break; - } - } - - acpi_put_table(tbl); -} - static int __init cppc_cpufreq_init(void) { int ret; @@ -866,7 +944,6 @@ static int __init cppc_cpufreq_init(void) if (!acpi_cpc_valid()) return -ENODEV; - cppc_check_hisi_workaround(); cppc_freq_invariance_init(); populate_efficiency_class(); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index b993a498084b..a010da0f6337 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,7 +103,13 @@ static const struct of_device_id allowlist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blocklist[] __initconst = { + { .compatible = "airoha,en7581", }, + + { .compatible = "allwinner,sun50i-a100" }, { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "allwinner,sun50i-h616", }, + { .compatible = "allwinner,sun50i-h618", }, + { .compatible = "allwinner,sun50i-h700", }, { .compatible = "apple,arm-platform", }, @@ -163,11 +169,13 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm6375", }, { .compatible = "qcom,sm7225", }, + { .compatible = "qcom,sm7325", }, { .compatible = "qcom,sm8150", }, { .compatible = "qcom,sm8250", }, { .compatible = "qcom,sm8350", }, { .compatible = "qcom,sm8450", }, { .compatible = "qcom,sm8550", }, + { .compatible = "qcom,sm8650", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, @@ -195,19 +203,18 @@ static const struct of_device_id blocklist[] __initconst = { static bool __init cpu0_node_has_opp_v2_prop(void) { - struct device_node *np = of_cpu_device_node_get(0); + struct device_node *np __free(device_node) = of_cpu_device_node_get(0); bool ret = false; if (of_property_present(np, "operating-points-v2")) ret = true; - of_node_put(np); return ret; } static int __init cpufreq_dt_platdev_init(void) { - struct device_node *np = of_find_node_by_path("/"); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; const void *data = NULL; @@ -223,14 +230,11 @@ static int __init cpufreq_dt_platdev_init(void) if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np)) goto create_pdev; - of_node_put(np); return -ENODEV; create_pdev: - of_node_put(np); return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", -1, data, sizeof(struct cpufreq_dt_platform_data))); } core_initcall(cpufreq_dt_platdev_init); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 2d83bbc65dd0..e80dd982a3e2 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -36,12 +36,6 @@ struct private_data { static LIST_HEAD(priv_list); -static struct freq_attr *cpufreq_dt_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, /* Extra space for boost-attr if required */ - NULL, -}; - static struct private_data *cpufreq_dt_find_data(int cpu) { struct private_data *priv; @@ -68,36 +62,22 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) */ static const char *find_supply_name(struct device *dev) { - struct device_node *np; - struct property *pp; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); int cpu = dev->id; - const char *name = NULL; - - np = of_node_get(dev->of_node); /* This must be valid for sure */ if (WARN_ON(!np)) return NULL; /* Try "cpu0" for older DTs */ - if (!cpu) { - pp = of_find_property(np, "cpu0-supply", NULL); - if (pp) { - name = "cpu0"; - goto node_put; - } - } + if (!cpu && of_property_present(np, "cpu0-supply")) + return "cpu0"; - pp = of_find_property(np, "cpu-supply", NULL); - if (pp) { - name = "cpu"; - goto node_put; - } + if (of_property_present(np, "cpu-supply")) + return "cpu"; dev_dbg(dev, "no regulator for cpu%d\n", cpu); -node_put: - of_node_put(np); - return name; + return NULL; } static int cpufreq_init(struct cpufreq_policy *policy) @@ -134,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; - /* Support turbo/boost mode */ - if (policy_has_boost_freq(policy)) { - /* This gets disabled by core on driver unregister */ - ret = cpufreq_enable_boost_support(); - if (ret) - goto out_clk_put; - cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; - } - return 0; - -out_clk_put: - clk_put(cpu_clk); - - return ret; } static int cpufreq_online(struct cpufreq_policy *policy) @@ -166,10 +132,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy) return 0; } -static int cpufreq_exit(struct cpufreq_policy *policy) +static void cpufreq_exit(struct cpufreq_policy *policy) { clk_put(policy->clk); - return 0; } static struct cpufreq_driver dt_cpufreq_driver = { @@ -184,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { .offline = cpufreq_offline, .register_em = cpufreq_register_em_with_opp, .name = "cpufreq-dt", - .attr = cpufreq_dt_attr, + .set_boost = cpufreq_boost_set_sw, .suspend = cpufreq_generic_suspend, }; @@ -318,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev) int ret, cpu; /* Request resources early so we can return in case of -EPROBE_DEFER */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { ret = dt_cpufreq_early_init(&pdev->dev, cpu); if (ret) goto err; @@ -360,7 +325,7 @@ static struct platform_driver dt_cpufreq_platdrv = { .name = "cpufreq-dt", }, .probe = dt_cpufreq_probe, - .remove_new = dt_cpufreq_remove, + .remove = dt_cpufreq_remove, }; module_platform_driver(dt_cpufreq_platdrv); diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index f7a7bcf6f52e..fedad1081973 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -359,11 +359,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) return 0; } -static int nforce2_cpu_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver nforce2_driver = { .name = "nforce2", .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, @@ -371,7 +366,6 @@ static struct cpufreq_driver nforce2_driver = { .target = nforce2_target, .get = nforce2_get, .init = nforce2_cpu_init, - .exit = nforce2_cpu_exit, }; #ifdef MODULE diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 66e10a19d76a..d7426e1d8bdd 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -25,6 +25,7 @@ #include <linux/mutex.h> #include <linux/pm_qos.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/tick.h> @@ -87,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_governor *new_gov, unsigned int new_pol); static bool cpufreq_boost_supported(void); +static int cpufreq_boost_trigger_state(int state); /* * Two notifier lists: the "policy" list is involved in the @@ -253,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -/** - * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. - * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). - */ -void cpufreq_cpu_release(struct cpufreq_policy *policy) -{ - if (WARN_ON(!policy)) - return; - - lockdep_assert_held(&policy->rwsem); - - up_write(&policy->rwsem); - - cpufreq_cpu_put(policy); -} - -/** - * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. - * @cpu: CPU to find the policy for. - * - * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and - * if the policy returned by it is not NULL, acquire its rwsem for writing. - * Return the policy if it is active or release it and return NULL otherwise. - * - * The policy returned by this function has to be released with the help of - * cpufreq_cpu_release() in order to release its rwsem and balance its usage - * counter properly. - */ -struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) - return NULL; - - down_write(&policy->rwsem); - - if (policy_is_inactive(policy)) { - cpufreq_cpu_release(policy); - return NULL; - } - - return policy; -} - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -534,16 +491,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); static unsigned int __resolve_freq(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int target_freq, + unsigned int min, unsigned int max, + unsigned int relation) { unsigned int idx; - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (!policy->freq_table) return target_freq; - idx = cpufreq_frequency_table_target(policy, target_freq, relation); + idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); policy->cached_resolved_idx = idx; policy->cached_target_freq = target_freq; return policy->freq_table[idx].frequency; @@ -563,7 +522,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { - return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); + unsigned int min = READ_ONCE(policy->min); + unsigned int max = READ_ONCE(policy->max); + + /* + * If this function runs in parallel with cpufreq_set_policy(), it may + * read policy->min before the update and policy->max after the update + * or the other way around, so there is no ordering guarantee. + * + * Resolve this by always honoring the max (in case it comes from + * thermal throttling or similar). + */ + if (unlikely(min > max)) + min = max; + + return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -575,30 +548,11 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) return policy->transition_delay_us; latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; - if (latency) { - unsigned int max_delay_us = 2 * MSEC_PER_SEC; - - /* - * If the platform already has high transition_latency, use it - * as-is. - */ - if (latency > max_delay_us) - return latency; + if (latency) + /* Give a 50% breathing room between updates */ + return latency + (latency >> 1); - /* - * For platforms that can change the frequency very fast (< 2 - * us), the above formula gives a decent transition delay. But - * for platforms where transition_latency is in milliseconds, it - * ends up giving unrealistic values. - * - * Cap the default transition delay to 2 ms, which seems to be - * a reasonable amount of time after which we should reevaluate - * the frequency. - */ - return min(latency * LATENCY_MULTIPLIER, max_delay_us); - } - - return LATENCY_MULTIPLIER; + return USEC_PER_MSEC; } EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); @@ -608,26 +562,25 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); static ssize_t show_boost(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); + return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled); } static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - int ret, enable; + bool enable; - ret = sscanf(buf, "%d", &enable); - if (ret != 1 || enable < 0 || enable > 1) + if (kstrtobool(buf, &enable)) return -EINVAL; if (cpufreq_boost_trigger_state(enable)) { pr_err("%s: Cannot %s BOOST!\n", - __func__, enable ? "enable" : "disable"); + __func__, str_enable_disable(enable)); return -EINVAL; } pr_debug("%s: cpufreq BOOST %s\n", - __func__, enable ? "enabled" : "disabled"); + __func__, str_enabled_disabled(enable)); return count; } @@ -638,33 +591,42 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf) return sysfs_emit(buf, "%d\n", policy->boost_enabled); } +static int policy_set_boost(struct cpufreq_policy *policy, bool enable) +{ + int ret; + + if (policy->boost_enabled == enable) + return 0; + + policy->boost_enabled = enable; + + ret = cpufreq_driver->set_boost(policy, enable); + if (ret) + policy->boost_enabled = !policy->boost_enabled; + + return ret; +} + static ssize_t store_local_boost(struct cpufreq_policy *policy, const char *buf, size_t count) { - int ret, enable; + int ret; + bool enable; - ret = kstrtoint(buf, 10, &enable); - if (ret || enable < 0 || enable > 1) + if (kstrtobool(buf, &enable)) return -EINVAL; if (!cpufreq_driver->boost_enabled) return -EINVAL; - if (policy->boost_enabled == enable) - return count; - - policy->boost_enabled = enable; - - cpus_read_lock(); - ret = cpufreq_driver->set_boost(policy, enable); - cpus_read_unlock(); + if (!policy->boost_supported) + return -EINVAL; - if (ret) { - policy->boost_enabled = !policy->boost_enabled; - return ret; - } + ret = policy_set_boost(policy, enable); + if (!ret) + return count; - return count; + return ret; } static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost); @@ -739,7 +701,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) static ssize_t show_##file_name \ (struct cpufreq_policy *policy, char *buf) \ { \ - return sprintf(buf, "%u\n", policy->object); \ + return sysfs_emit(buf, "%u\n", policy->object); \ } show_one(cpuinfo_min_freq, cpuinfo.min_freq); @@ -748,23 +710,31 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); -__weak unsigned int arch_freq_get_on_cpu(int cpu) +__weak int arch_freq_get_on_cpu(int cpu) { - return 0; + return -EOPNOTSUPP; +} + +static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy) +{ + return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP; } static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) { ssize_t ret; - unsigned int freq; + int freq; + + freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ) + ? arch_freq_get_on_cpu(policy->cpu) + : 0; - freq = arch_freq_get_on_cpu(policy->cpu); - if (freq) - ret = sprintf(buf, "%u\n", freq); + if (freq > 0) + ret = sysfs_emit(buf, "%u\n", freq); else if (cpufreq_driver->setpolicy && cpufreq_driver->get) - ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); + ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu)); else - ret = sprintf(buf, "%u\n", policy->cur); + ret = sysfs_emit(buf, "%u\n", policy->cur); return ret; } @@ -798,9 +768,22 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, unsigned int cur_freq = __cpufreq_get(policy); if (cur_freq) - return sprintf(buf, "%u\n", cur_freq); + return sysfs_emit(buf, "%u\n", cur_freq); - return sprintf(buf, "<unknown>\n"); + return sysfs_emit(buf, "<unknown>\n"); +} + +/* + * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware + */ +static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy, + char *buf) +{ + int avg_freq = arch_freq_get_on_cpu(policy->cpu); + + if (avg_freq > 0) + return sysfs_emit(buf, "%u\n", avg_freq); + return avg_freq != 0 ? avg_freq : -EINVAL; } /* @@ -809,12 +792,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) { if (policy->policy == CPUFREQ_POLICY_POWERSAVE) - return sprintf(buf, "powersave\n"); + return sysfs_emit(buf, "powersave\n"); else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) - return sprintf(buf, "performance\n"); + return sysfs_emit(buf, "performance\n"); else if (policy->governor) - return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", - policy->governor->name); + return sysfs_emit(buf, "%s\n", policy->governor->name); return -EINVAL; } @@ -824,7 +806,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { - char str_governor[16]; + char str_governor[CPUFREQ_NAME_LEN]; int ret; ret = sscanf(buf, "%15s", str_governor); @@ -873,7 +855,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, struct cpufreq_governor *t; if (!has_target()) { - i += sprintf(buf, "performance powersave"); + i += sysfs_emit(buf, "performance powersave"); goto out; } @@ -882,11 +864,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) break; - i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); + i += sysfs_emit_at(buf, i, "%s ", t->name); } mutex_unlock(&cpufreq_governor_mutex); out: - i += sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); return i; } @@ -896,7 +878,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) unsigned int cpu; for_each_cpu(cpu, mask) { - i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu); + i += sysfs_emit_at(buf, i, "%u ", cpu); if (i >= (PAGE_SIZE - 5)) break; } @@ -904,7 +886,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) /* Remove the extra space at the end */ i--; - i += sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); return i; } EXPORT_SYMBOL_GPL(cpufreq_show_cpus); @@ -935,9 +917,9 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, if (!policy->governor || !policy->governor->store_setspeed) return -EINVAL; - ret = sscanf(buf, "%u", &freq); - if (ret != 1) - return -EINVAL; + ret = kstrtouint(buf, 0, &freq); + if (ret) + return ret; policy->governor->store_setspeed(policy, freq); @@ -947,7 +929,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) { if (!policy->governor || !policy->governor->show_setspeed) - return sprintf(buf, "<unsupported>\n"); + return sysfs_emit(buf, "<unsupported>\n"); return policy->governor->show_setspeed(policy, buf); } @@ -961,11 +943,12 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) int ret; ret = cpufreq_driver->bios_limit(policy->cpu, &limit); if (!ret) - return sprintf(buf, "%u\n", limit); - return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); + return sysfs_emit(buf, "%u\n", limit); + return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq); } cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); +cpufreq_freq_attr_ro(cpuinfo_avg_freq); cpufreq_freq_attr_ro(cpuinfo_min_freq); cpufreq_freq_attr_ro(cpuinfo_max_freq); cpufreq_freq_attr_ro(cpuinfo_transition_latency); @@ -1003,17 +986,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EBUSY; if (!fattr->show) return -EIO; - down_read(&policy->rwsem); + guard(cpufreq_policy_read)(policy); + if (likely(!policy_is_inactive(policy))) - ret = fattr->show(policy, buf); - up_read(&policy->rwsem); + return fattr->show(policy, buf); - return ret; + return -EBUSY; } static ssize_t store(struct kobject *kobj, struct attribute *attr, @@ -1021,17 +1003,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EBUSY; if (!fattr->store) return -EIO; - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); + if (likely(!policy_is_inactive(policy))) - ret = fattr->store(policy, buf, count); - up_write(&policy->rwsem); + return fattr->store(policy, buf, count); - return ret; + return -EBUSY; } static void cpufreq_sysfs_release(struct kobject *kobj) @@ -1079,6 +1060,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) struct freq_attr **drv_attr; int ret = 0; + /* Attributes that need freq_table */ + if (policy->freq_table) { + ret = sysfs_create_file(&policy->kobj, + &cpufreq_freq_attr_scaling_available_freqs.attr); + if (ret) + return ret; + + if (cpufreq_boost_supported()) { + ret = sysfs_create_file(&policy->kobj, + &cpufreq_freq_attr_scaling_boost_freqs.attr); + if (ret) + return ret; + } + } + /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while (drv_attr && *drv_attr) { @@ -1093,6 +1089,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) return ret; } + if (cpufreq_avg_freq_supported(policy)) { + ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr); + if (ret) + return ret; + } + ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) return ret; @@ -1168,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (cpumask_test_cpu(cpu, policy->cpus)) return 0; - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); + if (has_target()) cpufreq_stop_governor(policy); @@ -1179,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (ret) pr_err("%s: Failed to start governor\n", __func__); } - up_write(&policy->rwsem); + return ret; } @@ -1199,9 +1202,10 @@ static void handle_update(struct work_struct *work) container_of(work, struct cpufreq_policy, update); pr_debug("handle_update for cpu %u called\n", policy->cpu); - down_write(&policy->rwsem); + + guard(cpufreq_policy_write)(policy); + refresh_frequency_limits(policy); - up_write(&policy->rwsem); } static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq, @@ -1227,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) struct kobject *kobj; struct completion *cmp; - down_write(&policy->rwsem); - cpufreq_stats_free_table(policy); - kobj = &policy->kobj; - cmp = &policy->kobj_unregister; - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + cpufreq_stats_free_table(policy); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + } kobject_put(kobj); /* @@ -1307,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) init_waitqueue_head(&policy->transition_wait); INIT_WORK(&policy->update, handle_update); - policy->cpu = cpu; return policy; err_min_qos_notifier: @@ -1376,35 +1379,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) kfree(policy); } -static int cpufreq_online(unsigned int cpu) +static int cpufreq_policy_online(struct cpufreq_policy *policy, + unsigned int cpu, bool new_policy) { - struct cpufreq_policy *policy; - bool new_policy; unsigned long flags; unsigned int j; int ret; - pr_debug("%s: bringing CPU%u online\n", __func__, cpu); + guard(cpufreq_policy_write)(policy); - /* Check if this CPU already has a policy to manage it */ - policy = per_cpu(cpufreq_cpu_data, cpu); - if (policy) { - WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); - if (!policy_is_inactive(policy)) - return cpufreq_add_policy_cpu(policy, cpu); - - /* This is the only online CPU for the policy. Start over. */ - new_policy = false; - down_write(&policy->rwsem); - policy->cpu = cpu; - policy->governor = NULL; - } else { - new_policy = true; - policy = cpufreq_policy_alloc(cpu); - if (!policy) - return -ENOMEM; - down_write(&policy->rwsem); - } + policy->cpu = cpu; + policy->governor = NULL; if (!new_policy && cpufreq_driver->online) { /* Recover policy->cpus using related_cpus */ @@ -1427,12 +1412,9 @@ static int cpufreq_online(unsigned int cpu) if (ret) { pr_debug("%s: %d: initialization failed\n", __func__, __LINE__); - goto out_free_policy; + goto out_clear_policy; } - /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ - policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy); - /* * The initialization has succeeded and the policy is online. * If there is a problem with its frequency table, take it @@ -1495,6 +1477,10 @@ static int cpufreq_online(unsigned int cpu) blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); + } else { + ret = freq_qos_update_request(policy->max_freq_req, policy->max); + if (ret < 0) + goto out_destroy_policy; } if (cpufreq_driver->get && has_target()) { @@ -1540,7 +1526,7 @@ static int cpufreq_online(unsigned int cpu) * frequency for longer duration. Hence, a BUG_ON(). */ BUG_ON(ret); - pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n", + pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n", __func__, policy->cpu, old_freq, policy->cur); } } @@ -1558,7 +1544,7 @@ static int cpufreq_online(unsigned int cpu) /* * Register with the energy model before - * sugov_eas_rebuild_sd() is called, which will result + * em_rebuild_sched_domains() is called, which will result * in rebuilding of the sched domains, which should only be done * once the energy model is properly initialized for the policy * first. @@ -1577,20 +1563,6 @@ static int cpufreq_online(unsigned int cpu) goto out_destroy_policy; } - up_write(&policy->rwsem); - - kobject_uevent(&policy->kobj, KOBJ_ADD); - - /* Callback for handling stuff after policy is ready */ - if (cpufreq_driver->ready) - cpufreq_driver->ready(policy); - - /* Register cpufreq cooling only for a new policy */ - if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver)) - policy->cdev = of_cpufreq_cooling_register(policy); - - pr_debug("initialization complete\n"); - return 0; out_destroy_policy: @@ -1605,14 +1577,72 @@ out_exit_policy: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); -out_free_policy: +out_clear_policy: cpumask_clear(policy->cpus); - up_write(&policy->rwsem); - cpufreq_policy_free(policy); return ret; } +static int cpufreq_online(unsigned int cpu) +{ + struct cpufreq_policy *policy; + bool new_policy; + int ret; + + pr_debug("%s: bringing CPU%u online\n", __func__, cpu); + + /* Check if this CPU already has a policy to manage it */ + policy = per_cpu(cpufreq_cpu_data, cpu); + if (policy) { + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); + if (!policy_is_inactive(policy)) + return cpufreq_add_policy_cpu(policy, cpu); + + /* This is the only online CPU for the policy. Start over. */ + new_policy = false; + } else { + new_policy = true; + policy = cpufreq_policy_alloc(cpu); + if (!policy) + return -ENOMEM; + } + + ret = cpufreq_policy_online(policy, cpu, new_policy); + if (ret) { + cpufreq_policy_free(policy); + return ret; + } + + kobject_uevent(&policy->kobj, KOBJ_ADD); + + /* Callback for handling stuff after policy is ready */ + if (cpufreq_driver->ready) + cpufreq_driver->ready(policy); + + /* Register cpufreq cooling only for a new policy */ + if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver)) + policy->cdev = of_cpufreq_cooling_register(policy); + + /* + * Let the per-policy boost flag mirror the cpufreq_driver boost during + * initialization for a new policy. For an existing policy, maintain the + * previous boost value unless global boost is disabled. + */ + if (cpufreq_driver->set_boost && policy->boost_supported && + (new_policy || !cpufreq_boost_enabled())) { + ret = policy_set_boost(policy, cpufreq_boost_enabled()); + if (ret) { + /* If the set_boost fails, the online operation is not affected */ + pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu, + str_enable_disable(cpufreq_boost_enabled())); + } + } + + pr_debug("initialization complete\n"); + + return 0; +} + /** * cpufreq_add_dev - the cpufreq interface for a CPU device. * @dev: CPU device. @@ -1679,10 +1709,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) */ if (cpufreq_driver->offline) { cpufreq_driver->offline(policy); - } else if (cpufreq_driver->exit) { - cpufreq_driver->exit(policy); - policy->freq_table = NULL; + return; } + + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + + policy->freq_table = NULL; } static int cpufreq_offline(unsigned int cpu) @@ -1697,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu) return 0; } - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); __cpufreq_offline(cpu, policy); - up_write(&policy->rwsem); return 0; } @@ -1718,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) if (!policy) return; - down_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + if (cpu_online(cpu)) + __cpufreq_offline(cpu, policy); - if (cpu_online(cpu)) - __cpufreq_offline(cpu, policy); + remove_cpu_dev_symlink(policy, cpu, dev); - remove_cpu_dev_symlink(policy, cpu, dev); + if (!cpumask_empty(policy->real_cpus)) + return; - if (!cpumask_empty(policy->real_cpus)) { - up_write(&policy->rwsem); - return; - } + /* + * Unregister cpufreq cooling once all the CPUs of the policy + * are removed. + */ + if (cpufreq_thermal_control_enabled(cpufreq_driver)) { + cpufreq_cooling_unregister(policy->cdev); + policy->cdev = NULL; + } - /* - * Unregister cpufreq cooling once all the CPUs of the policy are - * removed. - */ - if (cpufreq_thermal_control_enabled(cpufreq_driver)) { - cpufreq_cooling_unregister(policy->cdev); - policy->cdev = NULL; + /* We did light-weight exit earlier, do full tear down now */ + if (cpufreq_driver->offline && cpufreq_driver->exit) + cpufreq_driver->exit(policy); } - /* We did light-weight exit earlier, do full tear down now */ - if (cpufreq_driver->offline) - cpufreq_driver->exit(policy); - - up_write(&policy->rwsem); - cpufreq_policy_free(policy); } @@ -1814,27 +1842,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b */ unsigned int cpufreq_quick_get(unsigned int cpu) { - struct cpufreq_policy *policy; - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { - ret_freq = cpufreq_driver->get(cpu); + unsigned int ret_freq = cpufreq_driver->get(cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + return ret_freq; } read_unlock_irqrestore(&cpufreq_driver_lock, flags); policy = cpufreq_cpu_get(cpu); - if (policy) { - ret_freq = policy->cur; - cpufreq_cpu_put(policy); - } + if (policy) + return policy->cur; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_quick_get); @@ -1846,15 +1873,13 @@ EXPORT_SYMBOL(cpufreq_quick_get); */ unsigned int cpufreq_quick_get_max(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - ret_freq = policy->max; - cpufreq_cpu_put(policy); - } + policy = cpufreq_cpu_get(cpu); + if (policy) + return policy->max; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_quick_get_max); @@ -1866,15 +1891,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max); */ __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - ret_freq = policy->cpuinfo.max_freq; - cpufreq_cpu_put(policy); - } + policy = cpufreq_cpu_get(cpu); + if (policy) + return policy->cpuinfo.max_freq; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_get_hw_max_freq); @@ -1894,19 +1917,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - down_read(&policy->rwsem); - if (cpufreq_driver->get) - ret_freq = __cpufreq_get(policy); - up_read(&policy->rwsem); + policy = cpufreq_cpu_get(cpu); + if (!policy) + return 0; - cpufreq_cpu_put(policy); - } + guard(cpufreq_policy_read)(policy); + + if (cpufreq_driver->get) + return __cpufreq_get(policy); - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_get); @@ -1965,9 +1987,9 @@ void cpufreq_suspend(void) for_each_active_policy(policy) { if (has_target()) { - down_write(&policy->rwsem); - cpufreq_stop_governor(policy); - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + cpufreq_stop_governor(policy); + } } if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) @@ -2008,9 +2030,9 @@ void cpufreq_resume(void) pr_err("%s: Failed to resume driver: %s\n", __func__, cpufreq_driver->name); } else if (has_target()) { - down_write(&policy->rwsem); - ret = cpufreq_start_governor(policy); - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + ret = cpufreq_start_governor(policy); + } if (ret) pr_err("%s: Failed to start governor for CPU%u's policy\n", @@ -2340,7 +2362,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (cpufreq_disabled()) return -ENODEV; - target_freq = __resolve_freq(policy, target_freq, relation); + target_freq = __resolve_freq(policy, target_freq, policy->min, + policy->max, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -2377,15 +2400,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret; - - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); - ret = __cpufreq_driver_target(policy, target_freq, relation); - - up_write(&policy->rwsem); - - return ret; + return __cpufreq_driver_target(policy, target_freq, relation); } EXPORT_SYMBOL_GPL(cpufreq_driver_target); @@ -2557,30 +2574,39 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); * POLICY INTERFACE * *********************************************************************/ +DEFINE_PER_CPU(unsigned long, cpufreq_pressure); + /** - * cpufreq_get_policy - get the current cpufreq_policy - * @policy: struct cpufreq_policy into which the current cpufreq_policy - * is written - * @cpu: CPU to find the policy for + * cpufreq_update_pressure() - Update cpufreq pressure for CPUs + * @policy: cpufreq policy of the CPUs. * - * Reads the current cpufreq policy. + * Update the value of cpufreq pressure for all @cpus in the policy. */ -int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) +static void cpufreq_update_pressure(struct cpufreq_policy *policy) { - struct cpufreq_policy *cpu_policy; - if (!policy) - return -EINVAL; + unsigned long max_capacity, capped_freq, pressure; + u32 max_freq; + int cpu; - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) - return -EINVAL; + cpu = cpumask_first(policy->related_cpus); + max_freq = arch_scale_freq_ref(cpu); + capped_freq = policy->max; - memcpy(policy, cpu_policy, sizeof(*policy)); + /* + * Handle properly the boost frequencies, which should simply clean + * the cpufreq pressure value. + */ + if (max_freq <= capped_freq) { + pressure = 0; + } else { + max_capacity = arch_scale_cpu_capacity(cpu); + pressure = max_capacity - + mult_frac(max_capacity, capped_freq, max_freq); + } - cpufreq_cpu_put(cpu_policy); - return 0; + for_each_cpu(cpu, policy->related_cpus) + WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure); } -EXPORT_SYMBOL(cpufreq_get_policy); /** * cpufreq_set_policy - Modify cpufreq policy parameters. @@ -2630,13 +2656,22 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, * Resolve policy min/max to available frequencies. It ensures * no frequency resolution will neither overshoot the requested maximum * nor undershoot the requested minimum. + * + * Avoid storing intermediate values in policy->max or policy->min and + * compiler optimizations around them because they may be accessed + * concurrently by cpufreq_driver_resolve_freq() during the update. */ - policy->min = new_data.min; - policy->max = new_data.max; - policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); - policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); + WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, + new_data.min, new_data.max, + CPUFREQ_RELATION_H)); + new_data.min = __resolve_freq(policy, new_data.min, new_data.min, + new_data.max, CPUFREQ_RELATION_L); + WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); + trace_cpu_frequency_limits(policy); + cpufreq_update_pressure(policy); + policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", @@ -2689,6 +2724,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return ret; } +static void cpufreq_policy_refresh(struct cpufreq_policy *policy) +{ + guard(cpufreq_policy_write)(policy); + + /* + * BIOS might change freq behind our back + * -> ask driver for current freq and notify governors about a change + */ + if (cpufreq_driver->get && has_target() && + (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) + return; + + refresh_frequency_limits(policy); +} + /** * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. * @cpu: CPU to re-evaluate the policy for. @@ -2700,23 +2750,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, */ void cpufreq_update_policy(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy); + policy = cpufreq_cpu_get(cpu); if (!policy) return; - /* - * BIOS might change freq behind our back - * -> ask driver for current freq and notify governors about a change - */ - if (cpufreq_driver->get && has_target() && - (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) - goto unlock; - - refresh_frequency_limits(policy); - -unlock: - cpufreq_cpu_release(policy); + cpufreq_policy_refresh(policy); } EXPORT_SYMBOL(cpufreq_update_policy); @@ -2725,21 +2765,27 @@ EXPORT_SYMBOL(cpufreq_update_policy); * @cpu: CPU to update the policy limits for. * * Invoke the driver's ->update_limits callback if present or call - * cpufreq_update_policy() for @cpu. + * cpufreq_policy_refresh() for @cpu. */ void cpufreq_update_limits(unsigned int cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy); + + policy = cpufreq_cpu_get(cpu); + if (!policy) + return; + if (cpufreq_driver->update_limits) - cpufreq_driver->update_limits(cpu); + cpufreq_driver->update_limits(policy); else - cpufreq_update_policy(cpu); + cpufreq_policy_refresh(policy); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); /********************************************************************* * BOOST * *********************************************************************/ -static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) +int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) { int ret; @@ -2758,15 +2804,18 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) return 0; } +EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw); -int cpufreq_boost_trigger_state(int state) +static int cpufreq_boost_trigger_state(int state) { struct cpufreq_policy *policy; unsigned long flags; int ret = 0; - if (cpufreq_driver->boost_enabled == state) - return 0; + /* + * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to + * make sure all policies are in sync with global boost flag. + */ write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver->boost_enabled = state; @@ -2774,12 +2823,12 @@ int cpufreq_boost_trigger_state(int state) cpus_read_lock(); for_each_active_policy(policy) { - policy->boost_enabled = state; - ret = cpufreq_driver->set_boost(policy, state); - if (ret) { - policy->boost_enabled = !policy->boost_enabled; + if (!policy->boost_supported) + continue; + + ret = policy_set_boost(policy, state); + if (ret) goto err_reset_state; - } } cpus_read_unlock(); @@ -2793,7 +2842,7 @@ err_reset_state: write_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_err("%s: Cannot %s BOOST\n", - __func__, state ? "enable" : "disable"); + __func__, str_enable_disable(state)); return ret; } @@ -2821,22 +2870,7 @@ static void remove_boost_sysfs_file(void) sysfs_remove_file(cpufreq_global_kobject, &boost.attr); } -int cpufreq_enable_boost_support(void) -{ - if (!cpufreq_driver) - return -EINVAL; - - if (cpufreq_boost_supported()) - return 0; - - cpufreq_driver->set_boost = cpufreq_boost_set_sw; - - /* This will get removed on driver unregister */ - return create_boost_sysfs_file(); -} -EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); - -int cpufreq_boost_enabled(void) +bool cpufreq_boost_enabled(void) { return cpufreq_driver->boost_enabled; } @@ -3019,6 +3053,36 @@ static int __init cpufreq_core_init(void) return 0; } + +static bool cpufreq_policy_is_good_for_eas(unsigned int cpu) +{ + struct cpufreq_policy *policy __free(put_cpufreq_policy); + + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_debug("cpufreq policy not set for CPU: %d\n", cpu); + return false; + } + + return sugov_is_governor(policy); +} + +bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask) +{ + unsigned int cpu; + + /* Do not attempt EAS if schedutil is not being used. */ + for_each_cpu(cpu, cpu_mask) { + if (!cpufreq_policy_is_good_for_eas(cpu)) { + pr_debug("rd %*pbl: schedutil is mandatory for EAS\n", + cpumask_pr_args(cpu_mask)); + return false; + } + } + + return true; +} + module_param(off, int, 0444); module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444); core_initcall(cpufreq_core_init); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index af44ee6a6430..1a7fcaf39cc9 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy) time_elapsed = update_time - j_cdbs->prev_update_time; j_cdbs->prev_update_time = update_time; - idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + /* + * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if + * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is + * off, where idle_time is calculated by the difference between + * time elapsed in jiffies and "busy time" obtained from CPU + * statistics. If a CPU is 100% busy, the time elapsed and busy + * time should grow with the same amount in two consecutive + * samples, but in practice there could be a tiny difference, + * making the accumulated idle time decrease sometimes. Hence, + * in this case, idle_time should be regarded as 0 in order to + * make the further process correct. + */ + if (cur_idle_time > j_cdbs->prev_cpu_idle) + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + else + idle_time = 0; + j_cdbs->prev_cpu_idle = cur_idle_time; if (ignore_nice) { @@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely((int)idle_time > 2 * sampling_rate && + } else if (unlikely(idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy) load = j_cdbs->prev_load; j_cdbs->prev_load = 0; } else { - if (time_elapsed >= idle_time) { + if (time_elapsed > idle_time) load = 100 * (time_elapsed - idle_time) / time_elapsed; - } else { - /* - * That can happen if idle_time is returned by - * get_cpu_idle_time_jiffy(). In that case - * idle_time is roughly equal to the difference - * between time_elapsed and "busy time" obtained - * from CPU statistics. Then, the "busy time" - * can end up being greater than time_elapsed - * (for example, if jiffies_64 and the CPU - * statistics are updated by different CPUs), - * so idle_time may in fact be negative. That - * means, though, that the CPU was busy all - * the time (on the rough average) during the - * last sampling interval and 100 can be - * returned as the load. - */ - load = (int)idle_time < 0 ? 100 : 0; - } + else + load = 0; + j_cdbs->prev_load = load; } - if (unlikely((int)idle_time > 2 * sampling_rate)) { + if (unlikely(idle_time > 2 * sampling_rate)) { unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index a7c38b8b3e78..0e65d37c9231 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, return freq_next; } - index = cpufreq_frequency_table_target(policy, freq_next, relation); + index = cpufreq_frequency_table_target(policy, freq_next, policy->min, + policy->max, relation); freq_req = freq_table[index].frequency; freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 7d2754411d8c..2c277eb3795a 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = { .get = cpufreq_generic_get, .init = davinci_cpu_init, .name = "davinci", - .attr = cpufreq_generic_attr, }; static int __init davinci_cpufreq_probe(struct platform_device *pdev) @@ -145,7 +144,7 @@ static struct platform_driver davinci_cpufreq_driver = { .driver = { .name = "cpufreq-davinci", }, - .remove_new = __exit_p(davinci_cpufreq_remove), + .remove = __exit_p(davinci_cpufreq_remove), }; int __init davinci_cpufreq_init(void) diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index ab93bce8ae77..320a0af2266a 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } /* Enable Enhanced PowerSaver */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - wrmsrl(MSR_IA32_MISC_ENABLE, val); + wrmsrq(MSR_IA32_MISC_ENABLE, val); /* Can be locked at 0 */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { pr_info("Can't enable Enhanced PowerSaver\n"); return -ENODEV; @@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy) return 0; } -static int eps_cpu_exit(struct cpufreq_policy *policy) +static void eps_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; /* Bye */ kfree(eps_cpu[cpu]); eps_cpu[cpu] = NULL; - return 0; } static struct cpufreq_driver eps_driver = { @@ -377,7 +376,6 @@ static struct cpufreq_driver eps_driver = { .exit = eps_cpu_exit, .get = eps_get, .name = "e_powersaver", - .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 4ce5eb35dc46..fc5a58088b35 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -21,7 +21,6 @@ #include <linux/cpufreq.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #include <linux/timex.h> #include <linux/io.h> @@ -194,7 +193,6 @@ static struct cpufreq_driver elanfreq_driver = { .target_index = elanfreq_target, .init = elanfreq_cpu_init, .name = "elanfreq", - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id elan_id[] = { diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index c17dc51a5a02..35de513af6c9 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -14,7 +14,7 @@ * FREQUENCY TABLE HELPERS * *********************************************************************/ -bool policy_has_boost_freq(struct cpufreq_policy *policy) +static bool policy_has_boost_freq(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *pos, *table = policy->freq_table; @@ -27,7 +27,6 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } -EXPORT_SYMBOL_GPL(policy_has_boost_freq); int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) @@ -70,7 +69,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; - unsigned int freq, next_larger = ~0; + unsigned int freq, prev_smaller = 0; bool found = false; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", @@ -86,12 +85,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, break; } - if ((next_larger > freq) && (freq > policy->max)) - next_larger = freq; + if ((prev_smaller < freq) && (freq <= policy->max)) + prev_smaller = freq; } if (!found) { - policy->max = next_larger; + policy->max = prev_smaller; cpufreq_verify_within_cpu_limits(policy); } @@ -116,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, @@ -148,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; - if ((freq < policy->min) || (freq > policy->max)) + if (freq < min || freq > max) continue; if (freq == target_freq) { optimal.driver_data = i; @@ -194,7 +193,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, } if (optimal.driver_data > i) { if (suboptimal.driver_data > i) { - WARN(1, "Invalid frequency table: %d\n", policy->cpu); + WARN(1, "Invalid frequency table: %u\n", policy->cpu); return 0; } @@ -254,7 +253,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - count += sprintf(&buf[count], "%d ", pos->frequency); + count += sprintf(&buf[count], "%u ", pos->frequency); } count += sprintf(&buf[count], "\n"); @@ -276,7 +275,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, return show_available_freqs(policy, buf, false); } cpufreq_attr_available_freq(scaling_available); -EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* * scaling_boost_frequencies_show - show available boost frequencies for @@ -288,13 +286,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, return show_available_freqs(policy, buf, true); } cpufreq_attr_available_freq(scaling_boost); -EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs); - -struct freq_attr *cpufreq_generic_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; -EXPORT_SYMBOL_GPL(cpufreq_generic_attr); static int set_freq_table_sorted(struct cpufreq_policy *policy) { @@ -367,6 +358,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy) if (ret) return ret; + /* Driver's may have set this field already */ + if (policy_has_boost_freq(policy)) + policy->boost_supported = true; + return set_freq_table_sorted(policy); } diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 577bb9e2f112..1492c92ffc1a 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -183,7 +183,7 @@ static void imx_cpufreq_dt_remove(struct platform_device *pdev) static struct platform_driver imx_cpufreq_dt_driver = { .probe = imx_cpufreq_dt_probe, - .remove_new = imx_cpufreq_dt_remove, + .remove = imx_cpufreq_dt_remove, .driver = { .name = "imx-cpufreq-dt", }, diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c20d3ecc5a81..db1c88e9d3f9 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -207,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = { .init = imx6q_cpufreq_init, .register_em = cpufreq_register_em_with_opp, .name = "imx6q-cpufreq", - .attr = cpufreq_generic_attr, .suspend = cpufreq_generic_suspend, }; @@ -522,7 +521,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = { .name = "imx6q-cpufreq", }, .probe = imx6q_cpufreq_probe, - .remove_new = imx6q_cpufreq_remove, + .remove = imx6q_cpufreq_remove, }; module_platform_driver(imx6q_cpufreq_platdrv); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dbbf299f4219..64587d318267 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -16,6 +16,7 @@ #include <linux/tick.h> #include <linux/slab.h> #include <linux/sched/cpufreq.h> +#include <linux/sched/smt.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/cpufreq.h> @@ -27,6 +28,7 @@ #include <linux/pm_qos.h> #include <linux/bitfield.h> #include <trace/events/power.h> +#include <linux/units.h> #include <asm/cpu.h> #include <asm/div64.h> @@ -173,7 +175,6 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. - * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo @@ -182,7 +183,6 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; - bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; @@ -213,13 +213,15 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_cached Cached HWP energy-performance preference value + * @epp_cached: Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set + * @capacity_perf: Highest perf used for scale invariance * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. + * @pd_registered: Set when a perf domain is registered for this CPU. * @hwp_notify_work: workqueue for HWP notifications. * * This structure stores per CPU instance data for all CPUs. @@ -255,9 +257,13 @@ struct cpudata { u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; + unsigned int capacity_perf; unsigned int sched_flags; u32 hwp_boost_min; bool suspended; +#ifdef CONFIG_ENERGY_MODEL + bool pd_registered; +#endif struct delayed_work hwp_notify_work; }; @@ -292,22 +298,25 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; -static int hwp_active __read_mostly; -static int hwp_mode_bdw __read_mostly; -static bool per_cpu_limits __read_mostly; +static bool hwp_active __ro_after_init; +static int hwp_mode_bdw __ro_after_init; +static bool per_cpu_limits __ro_after_init; +static bool hwp_forced __ro_after_init; static bool hwp_boost __read_mostly; -static bool hwp_forced __read_mostly; +static bool hwp_is_hybrid; static struct cpufreq_driver *intel_pstate_driver __read_mostly; -#define HYBRID_SCALING_FACTOR 78741 +#define INTEL_PSTATE_CORE_SCALING 100000 +#define HYBRID_SCALING_FACTOR_ADL 78741 #define HYBRID_SCALING_FACTOR_MTL 80000 +#define HYBRID_SCALING_FACTOR_LNL 86957 -static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR; +static int hybrid_scaling_factor; static inline int core_get_scaling(void) { - return 100000; + return INTEL_PSTATE_CORE_SCALING; } #ifdef CONFIG_ACPI @@ -357,15 +366,14 @@ static void intel_pstate_set_itmt_prio(int cpu) int ret; ret = cppc_get_perf_caps(cpu, &cppc_perf); - if (ret) - return; - /* - * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. - * In this case we can't use CPPC.highest_perf to enable ITMT. - * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0]. + * + * Also, on some systems with overclocking enabled, CPPC.highest_perf is + * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT. + * Fall back to MSR_HWP_CAPABILITIES then too. */ - if (cppc_perf.highest_perf == CPPC_MAX_PERF) + if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF) cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); /* @@ -412,18 +420,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu) static int intel_pstate_cppc_get_scaling(int cpu) { struct cppc_perf_caps cppc_perf; - int ret; - - ret = cppc_get_perf_caps(cpu, &cppc_perf); /* - * If the nominal frequency and the nominal performance are not - * zero and the ratio between them is not 100, return the hybrid - * scaling factor. + * Compute the perf-to-frequency scaling factor for the given CPU if + * possible, unless it would be 0. */ - if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq && - cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq) - return hybrid_scaling_factor; + if (!cppc_get_perf_caps(cpu, &cppc_perf) && + cppc_perf.nominal_perf && cppc_perf.nominal_freq) + return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ, + cppc_perf.nominal_perf); return core_get_scaling(); } @@ -594,12 +599,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); } -static inline void update_turbo_state(void) +static bool turbo_is_disabled(void) { u64 misc_en; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + if (!cpu_feature_enabled(X86_FEATURE_IDA)) + return true; + + rdmsrq(MSR_IA32_MISC_ENABLE, misc_en); + + return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } static int min_perf_pct_min(void) @@ -619,7 +628,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return (s16)ret; @@ -636,7 +645,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) * MSR_HWP_REQUEST, so need to read and get EPP. */ if (!hwp_req_data) { - epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, + epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &hwp_req_data); if (epp) return epp; @@ -658,12 +667,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return ret; epb = (epb & ~0x0f) | pref; - wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); + wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); return 0; } @@ -761,7 +770,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) * function, so it cannot run in parallel with the update below. */ WRITE_ONCE(cpu->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); if (!ret) cpu->epp_cached = epp; @@ -915,7 +924,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) if (ratio <= 0) { u64 cap; - rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); ratio = HWP_GUARANTEED_PERF(cap); } @@ -935,11 +944,276 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { NULL, }; +static bool no_cas __ro_after_init; + +static struct cpudata *hybrid_max_perf_cpu __read_mostly; +/* + * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata, + * and the x86 arch scale-invariance information from concurrent updates. + */ +static DEFINE_MUTEX(hybrid_capacity_lock); + +#ifdef CONFIG_ENERGY_MODEL +#define HYBRID_EM_STATE_COUNT 4 + +static int hybrid_active_power(struct device *dev, unsigned long *power, + unsigned long *freq) +{ + /* + * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100% + * of the maximum capacity such that two CPUs of the same type will be + * regarded as equally attractive if the utilization of each of them + * falls into the same bin, which should prevent tasks from being + * migrated between them too often. + * + * For this purpose, return the "frequency" of 2 for the first + * performance level and otherwise leave the value set by the caller. + */ + if (!*freq) + *freq = 2; + + /* No power information. */ + *power = EM_MAX_POWER; + + return 0; +} + +static int hybrid_get_cost(struct device *dev, unsigned long freq, + unsigned long *cost) +{ + struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate; + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id); + + /* + * The smaller the perf-to-frequency scaling factor, the larger the IPC + * ratio between the given CPU and the least capable CPU in the system. + * Regard that IPC ratio as the primary cost component and assume that + * the scaling factors for different CPU types will differ by at least + * 5% and they will not be above INTEL_PSTATE_CORE_SCALING. + * + * Add the freq value to the cost, so that the cost of running on CPUs + * of the same type in different "utilization bins" is different. + */ + *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq; + /* + * Increase the cost slightly for CPUs able to access L3 to avoid + * touching it in case some other CPUs of the same type can do the work + * without it. + */ + if (cacheinfo) { + unsigned int i; + + /* Check if L3 cache is there. */ + for (i = 0; i < cacheinfo->num_leaves; i++) { + if (cacheinfo->info_list[i].level == 3) { + *cost += 2; + break; + } + } + } + + return 0; +} + +static bool hybrid_register_perf_domain(unsigned int cpu) +{ + static const struct em_data_callback cb + = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost); + struct cpudata *cpudata = all_cpu_data[cpu]; + struct device *cpu_dev; + + /* + * Registering EM perf domains without enabling asymmetric CPU capacity + * support is not really useful and one domain should not be registered + * more than once. + */ + if (!hybrid_max_perf_cpu || cpudata->pd_registered) + return false; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return false; + + if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb, + cpumask_of(cpu), false)) + return false; + + cpudata->pd_registered = true; + + return true; +} + +static void hybrid_register_all_perf_domains(void) +{ + unsigned int cpu; + + for_each_online_cpu(cpu) + hybrid_register_perf_domain(cpu); +} + +static void hybrid_update_perf_domain(struct cpudata *cpu) +{ + if (cpu->pd_registered) + em_adjust_cpu_capacity(cpu->cpu); +} +#else /* !CONFIG_ENERGY_MODEL */ +static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; } +static inline void hybrid_register_all_perf_domains(void) {} +static inline void hybrid_update_perf_domain(struct cpudata *cpu) {} +#endif /* CONFIG_ENERGY_MODEL */ + +static void hybrid_set_cpu_capacity(struct cpudata *cpu) +{ + arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf, + hybrid_max_perf_cpu->capacity_perf, + cpu->capacity_perf, + cpu->pstate.max_pstate_physical); + hybrid_update_perf_domain(cpu); + + topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu)); + + pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu, + cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, + cpu->pstate.max_pstate_physical); +} + +static void hybrid_clear_cpu_capacity(unsigned int cpunum) +{ + arch_set_cpu_capacity(cpunum, 1, 1, 1, 1); +} + +static void hybrid_get_capacity_perf(struct cpudata *cpu) +{ + if (READ_ONCE(global.no_turbo)) { + cpu->capacity_perf = cpu->pstate.max_pstate_physical; + return; + } + + cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); +} + +static void hybrid_set_capacity_of_cpus(void) +{ + int cpunum; + + for_each_online_cpu(cpunum) { + struct cpudata *cpu = all_cpu_data[cpunum]; + + if (cpu) + hybrid_set_cpu_capacity(cpu); + } +} + +static void hybrid_update_cpu_capacity_scaling(void) +{ + struct cpudata *max_perf_cpu = NULL; + unsigned int max_cap_perf = 0; + int cpunum; + + for_each_online_cpu(cpunum) { + struct cpudata *cpu = all_cpu_data[cpunum]; + + if (!cpu) + continue; + + /* + * During initialization, CPU performance at full capacity needs + * to be determined. + */ + if (!hybrid_max_perf_cpu) + hybrid_get_capacity_perf(cpu); + + /* + * If hybrid_max_perf_cpu is not NULL at this point, it is + * being replaced, so don't take it into account when looking + * for the new one. + */ + if (cpu == hybrid_max_perf_cpu) + continue; + + if (cpu->capacity_perf > max_cap_perf) { + max_cap_perf = cpu->capacity_perf; + max_perf_cpu = cpu; + } + } + + if (max_perf_cpu) { + hybrid_max_perf_cpu = max_perf_cpu; + hybrid_set_capacity_of_cpus(); + } else { + pr_info("Found no CPUs with nonzero maximum performance\n"); + /* Revert to the flat CPU capacity structure. */ + for_each_online_cpu(cpunum) + hybrid_clear_cpu_capacity(cpunum); + } +} + +static void __hybrid_refresh_cpu_capacity_scaling(void) +{ + hybrid_max_perf_cpu = NULL; + hybrid_update_cpu_capacity_scaling(); +} + +static void hybrid_refresh_cpu_capacity_scaling(void) +{ + guard(mutex)(&hybrid_capacity_lock); + + __hybrid_refresh_cpu_capacity_scaling(); + /* + * Perf domains are not registered before setting hybrid_max_perf_cpu, + * so register them all after setting up CPU capacity scaling. + */ + hybrid_register_all_perf_domains(); +} + +static void hybrid_init_cpu_capacity_scaling(bool refresh) +{ + /* Bail out if enabling capacity-aware scheduling is prohibited. */ + if (no_cas) + return; + + /* + * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity + * scaling has been enabled already and the driver is just changing the + * operation mode. + */ + if (refresh) { + hybrid_refresh_cpu_capacity_scaling(); + return; + } + + /* + * On hybrid systems, use asym capacity instead of ITMT, but because + * the capacity of SMT threads is not deterministic even approximately, + * do not do that when SMT is in use. + */ + if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { + hybrid_refresh_cpu_capacity_scaling(); + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity and EAS. + */ + sched_clear_itmt_support(); + } +} + +static bool hybrid_clear_max_perf_cpu(void) +{ + bool ret; + + guard(mutex)(&hybrid_capacity_lock); + + ret = !!hybrid_max_perf_cpu; + hybrid_max_perf_cpu = NULL; + + return ret; +} + static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(cpu->hwp_cap_cached, cap); cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); @@ -963,6 +1237,51 @@ static void intel_pstate_get_hwp_cap(struct cpudata *cpu) } } +static void hybrid_update_capacity(struct cpudata *cpu) +{ + unsigned int max_cap_perf; + + mutex_lock(&hybrid_capacity_lock); + + if (!hybrid_max_perf_cpu) + goto unlock; + + /* + * The maximum performance of the CPU may have changed, but assume + * that the performance of the other CPUs has not changed. + */ + max_cap_perf = hybrid_max_perf_cpu->capacity_perf; + + intel_pstate_get_hwp_cap(cpu); + + hybrid_get_capacity_perf(cpu); + /* Should hybrid_max_perf_cpu be replaced by this CPU? */ + if (cpu->capacity_perf > max_cap_perf) { + hybrid_max_perf_cpu = cpu; + hybrid_set_capacity_of_cpus(); + goto unlock; + } + + /* If this CPU is hybrid_max_perf_cpu, should it be replaced? */ + if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) { + hybrid_update_cpu_capacity_scaling(); + goto unlock; + } + + hybrid_set_cpu_capacity(cpu); + /* + * If the CPU was offline to start with and it is going online for the + * first time, a perf domain needs to be registered for it if hybrid + * capacity scaling has been enabled already. In that case, sched + * domains need to be rebuilt to take the new perf domain into account. + */ + if (hybrid_register_perf_domain(cpu->cpu)) + em_rebuild_sched_domains(); + +unlock: + mutex_unlock(&hybrid_capacity_lock); +} + static void intel_pstate_hwp_set(unsigned int cpu) { struct cpudata *cpu_data = all_cpu_data[cpu]; @@ -976,7 +1295,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) min = max; - rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value); value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); @@ -1023,7 +1342,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) } skip_epp: WRITE_ONCE(cpu_data->hwp_req_cached, value); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); @@ -1070,7 +1389,23 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + + mutex_lock(&hybrid_capacity_lock); + + if (!hybrid_max_perf_cpu) { + mutex_unlock(&hybrid_capacity_lock); + + return; + } + + if (hybrid_max_perf_cpu == cpu) + hybrid_update_cpu_capacity_scaling(); + + mutex_unlock(&hybrid_capacity_lock); + + /* Reset the capacity of the CPU going offline to the initial value. */ + hybrid_clear_cpu_capacity(cpu->cpu); } #define POWER_CTL_EE_ENABLE 1 @@ -1083,7 +1418,7 @@ static void set_power_ctl_ee_state(bool input) u64 power_ctl; mutex_lock(&intel_pstate_driver_lock); - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); if (input) { power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_ENABLE; @@ -1091,7 +1426,7 @@ static void set_power_ctl_ee_state(bool input) power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_DISABLE; } - wrmsrl(MSR_IA32_POWER_CTL, power_ctl); + wrmsrq(MSR_IA32_POWER_CTL, power_ctl); mutex_unlock(&intel_pstate_driver_lock); } @@ -1100,7 +1435,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata); static void intel_pstate_hwp_reenable(struct cpudata *cpu) { intel_pstate_hwp_enable(cpu); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); } static int intel_pstate_suspend(struct cpufreq_policy *policy) @@ -1151,45 +1486,55 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } -static void __intel_pstate_update_max_freq(struct cpudata *cpudata, - struct cpufreq_policy *policy) +static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, + struct cpudata *cpudata) { - policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + guard(cpufreq_policy_write)(policy); + + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); + + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); } -static void intel_pstate_update_max_freq(unsigned int cpu) +static bool intel_pstate_update_max_freq(struct cpudata *cpudata) { - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy); + policy = cpufreq_cpu_get(cpudata->cpu); if (!policy) - return; + return false; - __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); + __intel_pstate_update_max_freq(policy, cpudata); - cpufreq_cpu_release(policy); + return true; } -static void intel_pstate_update_limits(unsigned int cpu) +static void intel_pstate_update_limits(struct cpufreq_policy *policy) { - mutex_lock(&intel_pstate_driver_lock); + struct cpudata *cpudata = all_cpu_data[policy->cpu]; - update_turbo_state(); - /* - * If turbo has been turned on or off globally, policy limits for - * all CPUs need to be updated to reflect that. - */ - if (global.turbo_disabled_mf != global.turbo_disabled) { - global.turbo_disabled_mf = global.turbo_disabled; - arch_set_max_freq_ratio(global.turbo_disabled); - for_each_possible_cpu(cpu) - intel_pstate_update_max_freq(cpu); - } else { - cpufreq_update_policy(cpu); - } + __intel_pstate_update_max_freq(policy, cpudata); - mutex_unlock(&intel_pstate_driver_lock); + hybrid_update_capacity(cpudata); +} + +static void intel_pstate_update_limits_for_all(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + intel_pstate_update_max_freq(all_cpu_data[cpu]); + + mutex_lock(&hybrid_capacity_lock); + + if (hybrid_max_perf_cpu) + __hybrid_refresh_cpu_capacity_scaling(); + + mutex_unlock(&hybrid_capacity_lock); } /************************** sysfs begin ************************/ @@ -1287,11 +1632,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return -EAGAIN; } - update_turbo_state(); - if (global.turbo_disabled) - ret = sprintf(buf, "%u\n", global.turbo_disabled); - else - ret = sprintf(buf, "%u\n", global.no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1302,32 +1643,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; - int ret; + bool no_turbo; - ret = sscanf(buf, "%u", &input); - if (ret != 1) + if (sscanf(buf, "%u", &input) != 1) return -EINVAL; mutex_lock(&intel_pstate_driver_lock); if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); - return -EAGAIN; + count = -EAGAIN; + goto unlock_driver; } - mutex_lock(&intel_pstate_limits_lock); + no_turbo = !!clamp_t(int, input, 0, 1); - update_turbo_state(); - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); - mutex_unlock(&intel_pstate_limits_lock); - mutex_unlock(&intel_pstate_driver_lock); - return -EPERM; + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); + count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { + goto unlock_driver; } - global.no_turbo = clamp_t(int, input, 0, 1); + WRITE_ONCE(global.no_turbo, no_turbo); + + mutex_lock(&intel_pstate_limits_lock); - if (global.no_turbo) { + if (no_turbo) { struct cpudata *cpu = all_cpu_data[0]; int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; @@ -1338,9 +1686,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); - arch_set_max_freq_ratio(global.no_turbo); + intel_pstate_update_limits_for_all(); + arch_set_max_freq_ratio(no_turbo); +unlock_driver: mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1481,7 +1830,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut u64 power_ctl; int enable; - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); return sprintf(buf, "%d\n", !enable); } @@ -1618,98 +1967,97 @@ static void intel_pstate_notify_work(struct work_struct *work) { struct cpudata *cpudata = container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); - if (policy) { - intel_pstate_get_hwp_cap(cpudata); - __intel_pstate_update_max_freq(cpudata, policy); - - cpufreq_cpu_release(policy); + if (intel_pstate_update_max_freq(cpudata)) { + /* + * The driver will not be unregistered while this function is + * running, so update the capacity without acquiring the driver + * lock. + */ + hybrid_update_capacity(cpudata); } - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } -static DEFINE_SPINLOCK(hwp_notify_lock); +static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; +#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3) + void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); - struct cpudata *cpudata; + u64 value, status_mask; unsigned long flags; - u64 value; - if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; - rdmsrl_safe(MSR_HWP_STATUS, &value); - if (!(value & 0x01)) + status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS; + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; + + rdmsrq_safe(MSR_HWP_STATUS, &value); + if (!(value & status_mask)) return; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irqsave(&hwp_notify_lock, flags); if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; - /* - * Currently we never free all_cpu_data. And we can't reach here - * without this allocated. But for safety for future changes, added - * check. - */ - if (unlikely(!READ_ONCE(all_cpu_data))) - goto ack_intr; - - /* - * The free is done during cleanup, when cpufreq registry is failed. - * We wouldn't be here if it fails on init or switch status. But for - * future changes, added check. - */ - cpudata = READ_ONCE(all_cpu_data[this_cpu]); - if (unlikely(!cpudata)) - goto ack_intr; - - schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, + msecs_to_jiffies(10)); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); return; ack_intr: - wrmsrl_safe(MSR_HWP_STATUS, 0); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + wrmsrq_safe(MSR_HWP_STATUS, 0); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { - unsigned long flags; + bool cancel_work; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + + raw_spin_lock_irq(&hwp_notify_lock); + cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); + raw_spin_unlock_irq(&hwp_notify_lock); - spin_lock_irqsave(&hwp_notify_lock, flags); - if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) - cancel_delayed_work(&cpudata->hwp_notify_work); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + if (cancel_work) + cancel_delayed_work_sync(&cpudata->hwp_notify_work); } +#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2) + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { - /* Enable HWP notification interrupt for guaranteed performance change */ + /* Enable HWP notification interrupt for performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { - unsigned long flags; + u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irq(&hwp_notify_lock); + + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -1748,9 +2096,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt till we activate again */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); + wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); intel_pstate_enable_hwp_interrupt(cpudata); @@ -1764,7 +2112,7 @@ static int atom_get_min_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -1772,7 +2120,7 @@ static int atom_get_max_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -1780,7 +2128,7 @@ static int atom_get_turbo_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -1791,7 +2139,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1815,7 +2163,7 @@ static int silvermont_get_scaling(void) static int silvermont_freq_table[] = { 83300, 100000, 133300, 116700, 80000}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0x7; WARN_ON(i > 4); @@ -1831,7 +2179,7 @@ static int airmont_get_scaling(void) 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0xF; WARN_ON(i > 8); @@ -1842,7 +2190,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(MSR_ATOM_CORE_VIDS, value); + rdmsrq(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -1850,7 +2198,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -1858,7 +2206,7 @@ static int core_get_min_pstate(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 40) & 0xFF; } @@ -1866,7 +2214,7 @@ static int core_get_max_pstate_physical(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 8) & 0xFF; } @@ -1880,13 +2228,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info) int err; /* Get the TDP level (0, 1, 2) to get ratios */ - err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); + err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); if (err) return err; /* TDP MSR are continuous starting at 0x648 */ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); - err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); + err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); if (err) return err; @@ -1911,7 +2259,7 @@ static int core_get_max_pstate(int cpu) int tdp_ratio; int err; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); max_pstate = (plat_info >> 8) & 0xFF; tdp_ratio = core_get_tdp_ratio(cpu, plat_info); @@ -1923,7 +2271,7 @@ static int core_get_max_pstate(int cpu) return tdp_ratio; } - err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); + err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); if (!err) { int tar_levels; @@ -1943,7 +2291,7 @@ static int core_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (value) & 255; if (ret <= nont) @@ -1956,7 +2304,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; @@ -1972,7 +2320,7 @@ static int knl_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (((value) >> 8) & 0xFF); if (ret <= nont) @@ -1980,33 +2328,31 @@ static int knl_get_turbo_pstate(int cpu) return ret; } -static void hybrid_get_type(void *data) -{ - u8 *cpu_type = data; - - *cpu_type = get_this_hybrid_cpu_type(); -} - static int hwp_get_cpu_scaling(int cpu) { - u8 cpu_type = 0; + if (hybrid_scaling_factor) { + struct cpuinfo_x86 *c = &cpu_data(cpu); + u8 cpu_type = c->topo.intel_type; + + /* + * Return the hybrid scaling factor for P-cores and use the + * default core scaling for E-cores. + */ + if (cpu_type == INTEL_CPU_TYPE_CORE) + return hybrid_scaling_factor; - smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); - /* P-cores have a smaller perf level-to-freqency scaling factor. */ - if (cpu_type == 0x40) - return hybrid_scaling_factor; + if (cpu_type == INTEL_CPU_TYPE_ATOM) + return core_get_scaling(); + } - /* Use default core scaling for E-cores */ - if (cpu_type == 0x20) + /* Use core scaling on non-hybrid systems. */ + if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) return core_get_scaling(); /* - * If reached here, this system is either non-hybrid (like Tiger - * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with - * no E cores (in which case CPUID for hybrid support is 0). - * - * The CPPC nominal_frequency field is 0 for non-hybrid systems, - * so the default core scaling will be used for them. + * The system is hybrid, but the hybrid scaling factor is not known or + * the CPU type is not one of the above, so use CPPC to compute the + * scaling factor for this CPU. */ return intel_pstate_cppc_get_scaling(cpu); } @@ -2020,7 +2366,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) * the CPU being updated, so force the register update to run on the * right CPU. */ - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } @@ -2029,14 +2375,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static void intel_pstate_max_within_limits(struct cpudata *cpu) -{ - int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); - - update_turbo_state(); - intel_pstate_set_pstate(cpu, pstate); -} - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); @@ -2051,11 +2389,18 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_cpu_scaling) { cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); - if (cpu->pstate.scaling != perf_ctl_scaling) + if (cpu->pstate.scaling != perf_ctl_scaling) { intel_pstate_hybrid_hwp_adjust(cpu); + hwp_is_hybrid = true; + } } else { cpu->pstate.scaling = perf_ctl_scaling; } + /* + * If the CPU is going online for the first time and it was + * offline initially, asym capacity scaling needs to be updated. + */ + hybrid_update_capacity(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); @@ -2128,7 +2473,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) return; hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; - wrmsrl(MSR_HWP_REQUEST, hwp_req); + wrmsrq(MSR_HWP_REQUEST, hwp_req); cpu->last_update = cpu->sample.time; } @@ -2141,7 +2486,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) expired = time_after64(cpu->sample.time, cpu->last_update + hwp_boost_hold_time_ns); if (expired) { - wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); + wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached); cpu->hwp_boost_min = 0; } } @@ -2202,8 +2547,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) u64 tsc; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { local_irq_restore(flags); @@ -2262,7 +2607,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = global.no_turbo || global.turbo_disabled ? + target = READ_ONCE(global.no_turbo) ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -2297,7 +2642,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) return; cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } static void intel_pstate_adjust_pstate(struct cpudata *cpu) @@ -2306,8 +2651,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) struct sample *sample; int target_pstate; - update_turbo_state(); - target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); @@ -2402,52 +2745,58 @@ static const struct pstate_funcs knl_funcs = { .get_val = core_get_val, }; -#define X86_MATCH(model, policy) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_APERFMPERF, &policy) +#define X86_MATCH(vfm, policy) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - X86_MATCH(SANDYBRIDGE, core_funcs), - X86_MATCH(SANDYBRIDGE_X, core_funcs), - X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), - X86_MATCH(IVYBRIDGE, core_funcs), - X86_MATCH(HASWELL, core_funcs), - X86_MATCH(BROADWELL, core_funcs), - X86_MATCH(IVYBRIDGE_X, core_funcs), - X86_MATCH(HASWELL_X, core_funcs), - X86_MATCH(HASWELL_L, core_funcs), - X86_MATCH(HASWELL_G, core_funcs), - X86_MATCH(BROADWELL_G, core_funcs), - X86_MATCH(ATOM_AIRMONT, airmont_funcs), - X86_MATCH(SKYLAKE_L, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE, core_funcs), - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(XEON_PHI_KNL, knl_funcs), - X86_MATCH(XEON_PHI_KNM, knl_funcs), - X86_MATCH(ATOM_GOLDMONT, core_funcs), - X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(COMETLAKE, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(TIGERLAKE, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), - X86_MATCH(EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs), + X86_MATCH(INTEL_IVYBRIDGE, core_funcs), + X86_MATCH(INTEL_HASWELL, core_funcs), + X86_MATCH(INTEL_BROADWELL, core_funcs), + X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_HASWELL_X, core_funcs), + X86_MATCH(INTEL_HASWELL_L, core_funcs), + X86_MATCH(INTEL_HASWELL_G, core_funcs), + X86_MATCH(INTEL_BROADWELL_G, core_funcs), + X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs), + X86_MATCH(INTEL_SKYLAKE_L, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs), + X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_COMETLAKE, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_TIGERLAKE, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +#ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), + X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), + X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), {} }; +#endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - X86_MATCH(KABYLAKE, core_funcs), + X86_MATCH(INTEL_KABYLAKE, core_funcs), {} }; @@ -2484,7 +2833,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) } cpu->epp_powersave = -EINVAL; - cpu->epp_policy = 0; + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; intel_pstate_get_cpu_pstates(cpu); @@ -2526,7 +2875,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) static int intel_pstate_get_max_freq(struct cpudata *cpu) { - return global.turbo_disabled || global.no_turbo ? + return READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } @@ -2611,12 +2960,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); + /* * NOHZ_FULL CPUs need this as the governor callback may not * be invoked on them. */ intel_pstate_clear_update_util_hook(policy->cpu); - intel_pstate_max_within_limits(cpu); + intel_pstate_set_pstate(cpu, pstate); } else { intel_pstate_set_update_util_hook(policy->cpu); } @@ -2659,10 +3010,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, { int max_freq; - update_turbo_state(); if (hwp_active) { intel_pstate_get_hwp_cap(cpu); - max_freq = global.no_turbo || global.turbo_disabled ? + max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } else { max_freq = intel_pstate_get_max_freq(cpu); @@ -2719,6 +3069,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy) */ intel_pstate_hwp_reenable(cpu); cpu->suspended = false; + + hybrid_update_capacity(cpu); } return 0; @@ -2731,13 +3083,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) return intel_cpufreq_cpu_offline(policy); } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) +static void intel_pstate_cpu_exit(struct cpufreq_policy *policy) { pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; - - return 0; } static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -2756,9 +3106,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_freq; - update_turbo_state(); - global.turbo_disabled_mf = global.turbo_disabled; - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; policy->min = policy->cpuinfo.min_freq; @@ -2874,19 +3222,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, WRITE_ONCE(cpu->hwp_req_cached, value); if (fast_switch) - wrmsrl(MSR_HWP_REQUEST, value); + wrmsrq(MSR_HWP_REQUEST, value); else - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, u32 target_pstate, bool fast_switch) { if (fast_switch) - wrmsrl(MSR_IA32_PERF_CTL, + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); else - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); } @@ -2923,8 +3271,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; - update_turbo_state(); - freqs.old = policy->cur; freqs.new = target_freq; @@ -2946,8 +3292,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - update_turbo_state(); - target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); @@ -2965,9 +3309,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, int old_pstate = cpu->pstate.current_pstate; int cap_pstate, min_pstate, max_pstate, target_pstate; - update_turbo_state(); - cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : - HWP_HIGHEST_PERF(hwp_cap); + cap_pstate = READ_ONCE(global.no_turbo) ? + HWP_GUARANTEED_PERF(hwp_cap) : + HWP_HIGHEST_PERF(hwp_cap); /* Optimization: Avoid unnecessary divisions. */ @@ -3034,7 +3378,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) intel_pstate_get_hwp_cap(cpu); - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); cpu->epp_cached = intel_pstate_get_epp(cpu, value); @@ -3074,7 +3418,7 @@ pstate_exit: return ret; } -static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct freq_qos_request *req; @@ -3084,7 +3428,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) freq_qos_remove_request(req); kfree(req); - return intel_pstate_cpu_exit(policy); + intel_pstate_cpu_exit(policy); } static int intel_cpufreq_suspend(struct cpufreq_policy *policy) @@ -3101,7 +3445,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy) * written by it may not be suitable. */ value &= ~HWP_DESIRED_PERF(~0L); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); WRITE_ONCE(cpu->hwp_req_cached, value); } @@ -3135,10 +3479,8 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); - spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); WRITE_ONCE(all_cpu_data[cpu], NULL); - spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); @@ -3148,6 +3490,7 @@ static void intel_pstate_driver_cleanup(void) static int intel_pstate_register_driver(struct cpufreq_driver *driver) { + bool refresh_cpu_cap_scaling; int ret; if (driver == &intel_pstate) @@ -3155,6 +3498,12 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; + global.turbo_disabled = turbo_is_disabled(); + global.no_turbo = global.turbo_disabled; + + arch_set_max_freq_ratio(global.turbo_disabled); + + refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu(); intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); @@ -3165,6 +3514,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); + hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling); + return 0; } @@ -3344,7 +3695,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { - rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); + rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr); if (misc_pwr & BITMASK_OOB) { pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); @@ -3386,14 +3737,13 @@ static inline void intel_pstate_request_control_from_smm(void) {} #define INTEL_PSTATE_HWP_BROADWELL 0x01 -#define X86_MATCH_HWP(model, hwp_mode) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_HWP, hwp_mode) +#define X86_MATCH_HWP(vfm, hwp_mode) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode) static const struct x86_cpu_id hwp_support_ids[] __initconst = { - X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(ANY, 0), + X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_ANY, 0), {} }; @@ -3401,7 +3751,7 @@ static bool intel_pstate_hwp_is_enabled(void) { u64 value; - rdmsrl(MSR_PM_ENABLE, value); + rdmsrq(MSR_PM_ENABLE, value); return !!(value & 0x1); } @@ -3426,15 +3776,26 @@ static const struct x86_cpu_id intel_epp_default[] = { * which can result in one core turbo frequency for * AlderLake Mobile CPUs. */ - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, - HWP_EPP_BALANCE_POWERSAVE, 115, 16)), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), + X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), {} }; static const struct x86_cpu_id intel_hybrid_scaling_factor[] = { - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL), {} }; @@ -3447,6 +3808,15 @@ static int __init intel_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return -ENODEV; + /* + * The Intel pstate driver will be ignored if the platform + * firmware has its own power management modes. + */ + if (intel_pstate_platform_pwr_mgmt_exists()) { + pr_info("P-states controlled by the platform\n"); + return -ENODEV; + } + id = x86_match_cpu(hwp_support_ids); if (id) { hwp_forced = intel_pstate_hwp_is_enabled(); @@ -3466,7 +3836,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - WRITE_ONCE(hwp_active, 1); + hwp_active = true; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; @@ -3502,15 +3872,6 @@ static int __init intel_pstate_init(void) default_driver = &intel_cpufreq; hwp_cpu_matched: - /* - * The Intel pstate driver will be ignored if the platform - * firmware has its own power management modes. - */ - if (intel_pstate_platform_pwr_mgmt_exists()) { - pr_info("P-states controlled by the platform\n"); - return -ENODEV; - } - if (!hwp_active && hwp_only) return -ENOTSUPP; @@ -3594,6 +3955,9 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "no_hwp")) no_hwp = 1; + if (!strcmp(str, "no_cas")) + no_cas = true; + if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index fd20b986d1f2..24b285cbeb8d 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { .target_index = kirkwood_cpufreq_target, .init = kirkwood_cpufreq_cpu_init, .name = "kirkwood-cpufreq", - .attr = cpufreq_generic_attr, }; static int kirkwood_cpufreq_probe(struct platform_device *pdev) @@ -189,7 +188,7 @@ static void kirkwood_cpufreq_remove(struct platform_device *pdev) static struct platform_driver kirkwood_cpufreq_platform_driver = { .probe = kirkwood_cpufreq_probe, - .remove_new = kirkwood_cpufreq_remove, + .remove = kirkwood_cpufreq_remove, .driver = { .name = "kirkwood-cpufreq", }, diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 4c57c6725c13..ba0e08c8486a 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = mults_index & 0xff; @@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index) /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ @@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, union msr_longhaul longhaul; u32 t; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ if (!revid_errata) longhaul.bits.RevisionKey = longhaul.bits.RevisionID; @@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, /* Raise voltage if necessary */ if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); @@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -231,13 +231,14 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } } /** - * longhaul_set_cpu_frequency() - * @mults_index : bitpattern of the new multiplier. + * longhaul_setstate() + * @policy: cpufreq_policy structure containing the current policy. + * @table_index: index of the frequency within the cpufreq_frequency_table. * * Sets a new clock ratio. */ @@ -533,7 +534,7 @@ static void longhaul_setup_voltagescaling(void) unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { pr_info("Voltage scaling not supported by CPU\n"); return; @@ -905,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = { .get = longhaul_get, .init = longhaul_cpu_init, .name = "longhaul", - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id longhaul_id[] = { diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index afc59b292153..39a6c4315a60 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -85,19 +85,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver loongson2_cpufreq_driver = { .name = "loongson2", .init = loongson2_cpufreq_cpu_init, .verify = cpufreq_generic_frequency_table_verify, .target_index = loongson2_cpufreq_target, .get = cpufreq_generic_get, - .exit = loongson2_cpufreq_exit, - .attr = cpufreq_generic_attr, }; static const struct platform_device_id platform_device_ids[] = { @@ -154,7 +147,9 @@ static int __init cpufreq_init(void) ret = cpufreq_register_driver(&loongson2_cpufreq_driver); - if (!ret && !nowait) { + if (ret) { + platform_driver_unregister(&platform_driver); + } else if (!nowait) { saved_cpu_wait = cpu_wait; cpu_wait = loongson2_cpu_wait; } diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c new file mode 100644 index 000000000000..1e8715ea1b77 --- /dev/null +++ b/drivers/cpufreq/loongson3_cpufreq.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CPUFreq driver for the Loongson-3 processors. + * + * All revisions of Loongson-3 processor support cpu_has_scalefreq feature. + * + * Author: Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/units.h> + +#include <asm/idle.h> +#include <asm/loongarch.h> +#include <asm/loongson.h> + +/* Message */ +union smc_message { + u32 value; + struct { + u32 id : 4; + u32 info : 4; + u32 val : 16; + u32 cmd : 6; + u32 extra : 1; + u32 complete : 1; + }; +}; + +/* Command return values */ +#define CMD_OK 0 /* No error */ +#define CMD_ERROR 1 /* Regular error */ +#define CMD_NOCMD 2 /* Command does not support */ +#define CMD_INVAL 3 /* Invalid Parameter */ + +/* Version commands */ +/* + * CMD_GET_VERSION - Get interface version + * Input: none + * Output: version + */ +#define CMD_GET_VERSION 0x1 + +/* Feature commands */ +/* + * CMD_GET_FEATURE - Get feature state + * Input: feature ID + * Output: feature flag + */ +#define CMD_GET_FEATURE 0x2 + +/* + * CMD_SET_FEATURE - Set feature state + * Input: feature ID, feature flag + * output: none + */ +#define CMD_SET_FEATURE 0x3 + +/* Feature IDs */ +#define FEATURE_SENSOR 0 +#define FEATURE_FAN 1 +#define FEATURE_DVFS 2 + +/* Sensor feature flags */ +#define FEATURE_SENSOR_ENABLE BIT(0) +#define FEATURE_SENSOR_SAMPLE BIT(1) + +/* Fan feature flags */ +#define FEATURE_FAN_ENABLE BIT(0) +#define FEATURE_FAN_AUTO BIT(1) + +/* DVFS feature flags */ +#define FEATURE_DVFS_ENABLE BIT(0) +#define FEATURE_DVFS_BOOST BIT(1) +#define FEATURE_DVFS_AUTO BIT(2) +#define FEATURE_DVFS_SINGLE_BOOST BIT(3) + +/* Sensor commands */ +/* + * CMD_GET_SENSOR_NUM - Get number of sensors + * Input: none + * Output: number + */ +#define CMD_GET_SENSOR_NUM 0x4 + +/* + * CMD_GET_SENSOR_STATUS - Get sensor status + * Input: sensor ID, type + * Output: sensor status + */ +#define CMD_GET_SENSOR_STATUS 0x5 + +/* Sensor types */ +#define SENSOR_INFO_TYPE 0 +#define SENSOR_INFO_TYPE_TEMP 1 + +/* Fan commands */ +/* + * CMD_GET_FAN_NUM - Get number of fans + * Input: none + * Output: number + */ +#define CMD_GET_FAN_NUM 0x6 + +/* + * CMD_GET_FAN_INFO - Get fan status + * Input: fan ID, type + * Output: fan info + */ +#define CMD_GET_FAN_INFO 0x7 + +/* + * CMD_SET_FAN_INFO - Set fan status + * Input: fan ID, type, value + * Output: none + */ +#define CMD_SET_FAN_INFO 0x8 + +/* Fan types */ +#define FAN_INFO_TYPE_LEVEL 0 + +/* DVFS commands */ +/* + * CMD_GET_FREQ_LEVEL_NUM - Get number of freq levels + * Input: CPU ID + * Output: number + */ +#define CMD_GET_FREQ_LEVEL_NUM 0x9 + +/* + * CMD_GET_FREQ_BOOST_LEVEL - Get the first boost level + * Input: CPU ID + * Output: number + */ +#define CMD_GET_FREQ_BOOST_LEVEL 0x10 + +/* + * CMD_GET_FREQ_LEVEL_INFO - Get freq level info + * Input: CPU ID, level ID + * Output: level info + */ +#define CMD_GET_FREQ_LEVEL_INFO 0x11 + +/* + * CMD_GET_FREQ_INFO - Get freq info + * Input: CPU ID, type + * Output: freq info + */ +#define CMD_GET_FREQ_INFO 0x12 + +/* + * CMD_SET_FREQ_INFO - Set freq info + * Input: CPU ID, type, value + * Output: none + */ +#define CMD_SET_FREQ_INFO 0x13 + +/* Freq types */ +#define FREQ_INFO_TYPE_FREQ 0 +#define FREQ_INFO_TYPE_LEVEL 1 + +#define FREQ_MAX_LEVEL 16 + +struct loongson3_freq_data { + unsigned int def_freq_level; + struct cpufreq_frequency_table table[]; +}; + +static struct mutex cpufreq_mutex[MAX_PACKAGES]; +static struct cpufreq_driver loongson3_cpufreq_driver; +static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data); + +static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra) +{ + int retries; + unsigned int cpu = raw_smp_processor_id(); + unsigned int package = cpu_data[cpu].package; + union smc_message msg, last; + + mutex_lock(&cpufreq_mutex[package]); + + last.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX); + if (!last.complete) { + mutex_unlock(&cpufreq_mutex[package]); + return -EPERM; + } + + msg.id = id; + msg.info = info; + msg.cmd = cmd; + msg.val = val; + msg.extra = extra; + msg.complete = 0; + + iocsr_write32(msg.value, LOONGARCH_IOCSR_SMCMBX); + iocsr_write32(iocsr_read32(LOONGARCH_IOCSR_MISC_FUNC) | IOCSR_MISC_FUNC_SOFT_INT, + LOONGARCH_IOCSR_MISC_FUNC); + + for (retries = 0; retries < 10000; retries++) { + msg.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX); + if (msg.complete) + break; + + usleep_range(8, 12); + } + + if (!msg.complete || msg.cmd != CMD_OK) { + mutex_unlock(&cpufreq_mutex[package]); + return -EPERM; + } + + mutex_unlock(&cpufreq_mutex[package]); + + return msg.val; +} + +static unsigned int loongson3_cpufreq_get(unsigned int cpu) +{ + int ret; + + ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0); + + return ret * KILO; +} + +static int loongson3_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) +{ + int ret; + + ret = do_service_request(cpu_data[policy->cpu].core, + FREQ_INFO_TYPE_LEVEL, CMD_SET_FREQ_INFO, index, 0); + + return (ret >= 0) ? 0 : ret; +} + +static int configure_freq_table(int cpu) +{ + int i, ret, boost_level, max_level, freq_level; + struct platform_device *pdev = cpufreq_get_driver_data(); + struct loongson3_freq_data *data; + + if (per_cpu(freq_data, cpu)) + return 0; + + ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0); + if (ret < 0) + return ret; + max_level = ret; + + ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0); + if (ret < 0) + return ret; + boost_level = ret; + + freq_level = min(max_level, FREQ_MAX_LEVEL); + data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->def_freq_level = boost_level - 1; + + for (i = 0; i < freq_level; i++) { + ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0); + if (ret < 0) { + devm_kfree(&pdev->dev, data); + return ret; + } + + data->table[i].frequency = ret * KILO; + data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0; + } + + data->table[freq_level].flags = 0; + data->table[freq_level].frequency = CPUFREQ_TABLE_END; + + per_cpu(freq_data, cpu) = data; + + return 0; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int i, ret, cpu = policy->cpu; + + ret = configure_freq_table(cpu); + if (ret < 0) + return ret; + + policy->cpuinfo.transition_latency = 10000; + policy->freq_table = per_cpu(freq_data, cpu)->table; + policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency; + cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu)); + + for_each_cpu(i, policy->cpus) { + if (i != cpu) + per_cpu(freq_data, i) = per_cpu(freq_data, cpu); + } + + return 0; +} + +static void loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + int cpu = policy->cpu; + + loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level); +} + +static int loongson3_cpufreq_cpu_online(struct cpufreq_policy *policy) +{ + return 0; +} + +static int loongson3_cpufreq_cpu_offline(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .name = "loongson3", + .flags = CPUFREQ_CONST_LOOPS, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .online = loongson3_cpufreq_cpu_online, + .offline = loongson3_cpufreq_cpu_offline, + .get = loongson3_cpufreq_get, + .target_index = loongson3_cpufreq_target, + .verify = cpufreq_generic_frequency_table_verify, + .set_boost = cpufreq_boost_set_sw, + .suspend = cpufreq_generic_suspend, +}; + +static int loongson3_cpufreq_probe(struct platform_device *pdev) +{ + int i, ret; + + for (i = 0; i < MAX_PACKAGES; i++) { + ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]); + if (ret) + return ret; + } + + ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0); + if (ret <= 0) + return -EPERM; + + ret = do_service_request(FEATURE_DVFS, 0, CMD_SET_FEATURE, + FEATURE_DVFS_ENABLE | FEATURE_DVFS_BOOST, 0); + if (ret < 0) + return -EPERM; + + loongson3_cpufreq_driver.driver_data = pdev; + + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + if (ret) + return ret; + + pr_info("cpufreq: Loongson-3 CPU frequency driver.\n"); + + return 0; +} + +static void loongson3_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); +} + +static struct platform_device_id cpufreq_id_table[] = { + { "loongson3_cpufreq", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, cpufreq_id_table); + +static struct platform_driver loongson3_platform_driver = { + .driver = { + .name = "loongson3_cpufreq", + }, + .id_table = cpufreq_id_table, + .probe = loongson3_cpufreq_probe, + .remove = loongson3_cpufreq_remove, +}; +module_platform_driver(loongson3_platform_driver); + +MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>"); +MODULE_DESCRIPTION("CPUFreq driver for Loongson-3 processors"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c deleted file mode 100644 index f9306410a07f..000000000000 --- a/drivers/cpufreq/maple-cpufreq.c +++ /dev/null @@ -1,241 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2011 Dmitry Eremin-Solenikov - * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> - * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de> - * - * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs, - * that is iMac G5 and latest single CPU desktop. - */ - -#undef DEBUG - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/completion.h> -#include <linux/mutex.h> -#include <linux/time.h> -#include <linux/of.h> - -#define DBG(fmt...) pr_debug(fmt) - -/* see 970FX user manual */ - -#define SCOM_PCR 0x0aa001 /* PCR scom addr */ - -#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */ -#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */ -#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */ -#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */ -#define PCR_SPEED_MASK 0x000e0000U /* speed mask */ -#define PCR_SPEED_SHIFT 17 -#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */ -#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */ -#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */ -#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */ -#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */ -#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */ - -#define SCOM_PSR 0x408001 /* PSR scom addr */ -/* warning: PSR is a 64 bits register */ -#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */ -#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */ -#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */ -#define PSR_CUR_SPEED_SHIFT (56) - -/* - * The G5 only supports two frequencies (Quarter speed is not supported) - */ -#define CPUFREQ_HIGH 0 -#define CPUFREQ_LOW 1 - -static struct cpufreq_frequency_table maple_cpu_freqs[] = { - {0, CPUFREQ_HIGH, 0}, - {0, CPUFREQ_LOW, 0}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -/* Power mode data is an array of the 32 bits PCR values to use for - * the various frequencies, retrieved from the device-tree - */ -static int maple_pmode_cur; - -static const u32 *maple_pmode_data; -static int maple_pmode_max; - -/* - * SCOM based frequency switching for 970FX rev3 - */ -static int maple_scom_switch_freq(int speed_mode) -{ - unsigned long flags; - int to; - - local_irq_save(flags); - - /* Clear PCR high */ - scom970_write(SCOM_PCR, 0); - /* Clear PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0); - /* Set PCR low */ - scom970_write(SCOM_PCR, PCR_HILO_SELECT | - maple_pmode_data[speed_mode]); - - /* Wait for completion */ - for (to = 0; to < 10; to++) { - unsigned long psr = scom970_read(SCOM_PSR); - - if ((psr & PSR_CMD_RECEIVED) == 0 && - (((psr >> PSR_CUR_SPEED_SHIFT) ^ - (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3) - == 0) - break; - if (psr & PSR_CMD_COMPLETED) - break; - udelay(100); - } - - local_irq_restore(flags); - - maple_pmode_cur = speed_mode; - ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul; - - return 0; -} - -static int maple_scom_query_freq(void) -{ - unsigned long psr = scom970_read(SCOM_PSR); - int i; - - for (i = 0; i <= maple_pmode_max; i++) - if ((((psr >> PSR_CUR_SPEED_SHIFT) ^ - (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0) - break; - return i; -} - -/* - * Common interface to the cpufreq core - */ - -static int maple_cpufreq_target(struct cpufreq_policy *policy, - unsigned int index) -{ - return maple_scom_switch_freq(index); -} - -static unsigned int maple_cpufreq_get_speed(unsigned int cpu) -{ - return maple_cpu_freqs[maple_pmode_cur].frequency; -} - -static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - cpufreq_generic_init(policy, maple_cpu_freqs, 12000); - return 0; -} - -static struct cpufreq_driver maple_cpufreq_driver = { - .name = "maple", - .flags = CPUFREQ_CONST_LOOPS, - .init = maple_cpufreq_cpu_init, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = maple_cpufreq_target, - .get = maple_cpufreq_get_speed, - .attr = cpufreq_generic_attr, -}; - -static int __init maple_cpufreq_init(void) -{ - struct device_node *cpunode; - unsigned int psize; - unsigned long max_freq; - const u32 *valp; - u32 pvr_hi; - int rc = -ENODEV; - - /* - * Behave here like powermac driver which checks machine compatibility - * to ease merging of two drivers in future. - */ - if (!of_machine_is_compatible("Momentum,Maple") && - !of_machine_is_compatible("Momentum,Apache")) - return 0; - - /* Get first CPU node */ - cpunode = of_cpu_device_node_get(0); - if (cpunode == NULL) { - pr_err("Can't find any CPU 0 node\n"); - goto bail_noprops; - } - - /* Check 970FX for now */ - /* we actually don't care on which CPU to access PVR */ - pvr_hi = PVR_VER(mfspr(SPRN_PVR)); - if (pvr_hi != 0x3c && pvr_hi != 0x44) { - pr_err("Unsupported CPU version (%x)\n", pvr_hi); - goto bail_noprops; - } - - /* Look for the powertune data in the device-tree */ - /* - * On Maple this property is provided by PIBS in dual-processor config, - * not provided by PIBS in CPU0 config and also not provided by SLOF, - * so YMMV - */ - maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize); - if (!maple_pmode_data) { - DBG("No power-mode-data !\n"); - goto bail_noprops; - } - maple_pmode_max = psize / sizeof(u32) - 1; - - /* - * From what I see, clock-frequency is always the maximal frequency. - * The current driver can not slew sysclk yet, so we really only deal - * with powertune steps for now. We also only implement full freq and - * half freq in this version. So far, I haven't yet seen a machine - * supporting anything else. - */ - valp = of_get_property(cpunode, "clock-frequency", NULL); - if (!valp) - goto bail_noprops; - max_freq = (*valp)/1000; - maple_cpu_freqs[0].frequency = max_freq; - maple_cpu_freqs[1].frequency = max_freq/2; - - /* Force apply current frequency to make sure everything is in - * sync (voltage is right for example). Firmware may leave us with - * a strange setting ... - */ - msleep(10); - maple_pmode_cur = -1; - maple_scom_switch_freq(maple_scom_query_freq()); - - pr_info("Registering Maple CPU frequency driver\n"); - pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", - maple_cpu_freqs[1].frequency/1000, - maple_cpu_freqs[0].frequency/1000, - maple_cpu_freqs[maple_pmode_cur].frequency/1000); - - rc = cpufreq_register_driver(&maple_cpufreq_driver); - -bail_noprops: - of_node_put(cpunode); - - return rc; -} - -module_init(maple_cpufreq_init); - - -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 8d097dcddda4..74f1b4c796e4 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -62,7 +62,7 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, policy = cpufreq_cpu_get_raw(cpu_dev->id); if (!policy) - return 0; + return -EINVAL; data = policy->driver_data; @@ -260,7 +260,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) return 0; } -static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { struct mtk_cpufreq_data *data = policy->driver_data; struct resource *res = data->res; @@ -270,8 +270,6 @@ static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]); iounmap(base); release_mem_region(res->start, resource_size(res)); - - return 0; } static void mtk_cpufreq_register_em(struct cpufreq_policy *policy) @@ -295,7 +293,6 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = { .register_em = mtk_cpufreq_register_em, .fast_switch = mtk_cpufreq_hw_fast_switch, .name = "mtk-cpufreq-hw", - .attr = cpufreq_generic_attr, }; static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) @@ -306,7 +303,7 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) struct regulator *cpu_reg; /* Make sure that all CPU supplies are available before proceeding. */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpu_dev = get_cpu_device(cpu); if (!cpu_dev) return dev_err_probe(&pdev->dev, -EPROBE_DEFER, @@ -346,7 +343,7 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match); static struct platform_driver mtk_cpufreq_hw_driver = { .probe = mtk_cpufreq_hw_driver_probe, - .remove_new = mtk_cpufreq_hw_driver_remove, + .remove = mtk_cpufreq_hw_driver_remove, .driver = { .name = "mtk-cpufreq-hw", .of_match_table = mtk_cpufreq_hw_match, diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index a0a61919bc4c..f3f02c4b6888 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -390,28 +390,23 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) int ret; cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - dev_err(cpu_dev, "failed to get cpu%d device\n", cpu); - return -ENODEV; - } + if (!cpu_dev) + return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu); info->cpu_dev = cpu_dev; info->ccifreq_bound = false; if (info->soc_data->ccifreq_supported) { info->cci_dev = of_get_cci(info->cpu_dev); - if (IS_ERR(info->cci_dev)) { - ret = PTR_ERR(info->cci_dev); - dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu); - return -ENODEV; - } + if (IS_ERR(info->cci_dev)) + return dev_err_probe(cpu_dev, PTR_ERR(info->cci_dev), + "cpu%d: failed to get cci device\n", + cpu); } info->cpu_clk = clk_get(cpu_dev, "cpu"); - if (IS_ERR(info->cpu_clk)) { - ret = PTR_ERR(info->cpu_clk); - return dev_err_probe(cpu_dev, ret, + if (IS_ERR(info->cpu_clk)) + return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk), "cpu%d: failed to get cpu clk\n", cpu); - } info->inter_clk = clk_get(cpu_dev, "intermediate"); if (IS_ERR(info->inter_clk)) { @@ -431,7 +426,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) ret = regulator_enable(info->proc_reg); if (ret) { - dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu); goto out_free_proc_reg; } @@ -439,14 +434,17 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) info->sram_reg = regulator_get_optional(cpu_dev, "sram"); if (IS_ERR(info->sram_reg)) { ret = PTR_ERR(info->sram_reg); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + dev_err_probe(cpu_dev, ret, + "cpu%d: Failed to get sram regulator\n", cpu); goto out_disable_proc_reg; + } info->sram_reg = NULL; } else { ret = regulator_enable(info->sram_reg); if (ret) { - dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu); goto out_free_sram_reg; } } @@ -454,31 +452,34 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); if (ret) { - dev_err(cpu_dev, + dev_err_probe(cpu_dev, ret, "cpu%d: failed to get OPP-sharing information\n", cpu); goto out_disable_sram_reg; } ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); if (ret) { - dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu); goto out_disable_sram_reg; } ret = clk_prepare_enable(info->cpu_clk); - if (ret) + if (ret) { + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu); goto out_free_opp_table; + } ret = clk_prepare_enable(info->inter_clk); - if (ret) + if (ret) { + dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu); goto out_disable_mux_clock; + } if (info->soc_data->ccifreq_supported) { info->vproc_on_boot = regulator_get_voltage(info->proc_reg); if (info->vproc_on_boot < 0) { - ret = info->vproc_on_boot; - dev_err(info->cpu_dev, - "invalid Vproc value: %d\n", info->vproc_on_boot); + ret = dev_err_probe(info->cpu_dev, info->vproc_on_boot, + "invalid Vproc value\n"); goto out_disable_inter_clock; } } @@ -487,8 +488,8 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) rate = clk_get_rate(info->inter_clk); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); if (IS_ERR(opp)) { - dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu); - ret = PTR_ERR(opp); + ret = dev_err_probe(cpu_dev, PTR_ERR(opp), + "cpu%d: failed to get intermediate opp\n", cpu); goto out_disable_inter_clock; } info->intermediate_voltage = dev_pm_opp_get_voltage(opp); @@ -501,7 +502,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier; ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb); if (ret) { - dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu); + dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu); goto out_disable_inter_clock; } @@ -599,13 +600,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) return 0; } -static int mtk_cpufreq_exit(struct cpufreq_policy *policy) +static void mtk_cpufreq_exit(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info = policy->driver_data; dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); - - return 0; } static struct cpufreq_driver mtk_cpufreq_driver = { @@ -619,7 +618,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = { .exit = mtk_cpufreq_exit, .register_em = cpufreq_register_em_with_opp, .name = "mtk-cpufreq", - .attr = cpufreq_generic_attr, }; static int mtk_cpufreq_probe(struct platform_device *pdev) @@ -629,38 +627,33 @@ static int mtk_cpufreq_probe(struct platform_device *pdev) int cpu, ret; data = dev_get_platdata(&pdev->dev); - if (!data) { - dev_err(&pdev->dev, - "failed to get mtk cpufreq platform data\n"); - return -ENODEV; - } + if (!data) + return dev_err_probe(&pdev->dev, -ENODEV, + "failed to get mtk cpufreq platform data\n"); - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { info = mtk_cpu_dvfs_info_lookup(cpu); if (info) continue; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { - ret = -ENOMEM; + ret = dev_err_probe(&pdev->dev, -ENOMEM, + "Failed to allocate dvfs_info\n"); goto release_dvfs_info_list; } info->soc_data = data; ret = mtk_cpu_dvfs_info_init(info, cpu); - if (ret) { - dev_err(&pdev->dev, - "failed to initialize dvfs info for cpu%d\n", - cpu); + if (ret) goto release_dvfs_info_list; - } list_add(&info->list_head, &dvfs_info_list); } ret = cpufreq_register_driver(&mtk_cpufreq_driver); if (ret) { - dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); + dev_err_probe(&pdev->dev, ret, "failed to register mtk cpufreq driver\n"); goto release_dvfs_info_list; } @@ -707,6 +700,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = { .ccifreq_supported = false, }; +static const struct mtk_cpufreq_platform_data mt7988_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 200000, + .proc_max_volt = 900000, + .sram_min_volt = 0, + .sram_max_volt = 1150000, + .ccifreq_supported = true, +}; + static const struct mtk_cpufreq_platform_data mt8183_platform_data = { .min_volt_shift = 100000, .max_volt_shift = 200000, @@ -735,11 +737,12 @@ static const struct mtk_cpufreq_platform_data mt8516_platform_data = { }; /* List of machines supported by this driver */ -static const struct of_device_id mtk_cpufreq_machines[] __initconst = { +static const struct of_device_id mtk_cpufreq_machines[] __initconst __maybe_unused = { { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data }, { .compatible = "mediatek,mt7623", .data = &mt7623_platform_data }, + { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data }, { .compatible = "mediatek,mt8167", .data = &mt8516_platform_data }, { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data }, diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c index 7f3cfe668f30..2aad4c04673c 100644 --- a/drivers/cpufreq/mvebu-cpufreq.c +++ b/drivers/cpufreq/mvebu-cpufreq.c @@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void) * it), and registers the clock notifier that will take care * of doing the PMSU part of a frequency transition. */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev; struct clk *clk; int ret; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 895690856665..bbb01d93b54b 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -28,9 +28,6 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <asm/smp_plat.h> -#include <asm/cpu.h> - /* OPP tolerance in percentage */ #define OPP_TOLERANCE 4 @@ -135,11 +132,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy) return 0; } -static int omap_cpu_exit(struct cpufreq_policy *policy) +static void omap_cpu_exit(struct cpufreq_policy *policy) { freq_table_free(); clk_put(policy->clk); - return 0; } static struct cpufreq_driver omap_driver = { @@ -151,7 +147,6 @@ static struct cpufreq_driver omap_driver = { .exit = omap_cpu_exit, .register_em = cpufreq_register_em_with_opp, .name = "omap", - .attr = cpufreq_generic_attr, }; static int omap_cpufreq_probe(struct platform_device *pdev) @@ -192,7 +187,7 @@ static struct platform_driver omap_cpufreq_platdrv = { .name = "omap-cpufreq", }, .probe = omap_cpufreq_probe, - .remove_new = omap_cpufreq_remove, + .remove = omap_cpufreq_remove, }; module_platform_driver(omap_cpufreq_platdrv); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index ef0a3216a386..69c19233fcd4 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = { .init = cpufreq_p4_cpu_init, .get = cpufreq_p4_get, .name = "p4-clockmod", - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id cpufreq_p4_id[] = { diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 039a66bbe1be..a3931349360f 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -204,21 +204,19 @@ out: return err; } -static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) { /* * We don't support CPU hotplug. Don't unmap after the system * has already made it to a running state. */ if (system_state >= SYSTEM_RUNNING) - return 0; + return; if (sdcasr_mapbase) iounmap(sdcasr_mapbase); if (sdcpwr_mapbase) iounmap(sdcpwr_mapbase); - - return 0; } static int pas_cpufreq_target(struct cpufreq_policy *policy, @@ -247,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = { .exit = pas_cpufreq_cpu_exit, .verify = cpufreq_generic_frequency_table_verify, .target_index = pas_cpufreq_target, - .attr = cpufreq_generic_attr, }; /* @@ -271,5 +268,6 @@ static void __exit pas_cpufreq_exit(void) module_init(pas_cpufreq_init); module_exit(pas_cpufreq_exit); +MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>"); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 6f8b5ea7aeae..ac2e90a65f0c 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -562,18 +562,12 @@ out: return result; } -static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - return 0; -} - static struct cpufreq_driver pcc_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .get = pcc_get_freq, .verify = pcc_cpufreq_verify, .target = pcc_cpufreq_target, .init = pcc_cpufreq_cpu_init, - .exit = pcc_cpufreq_cpu_exit, .name = "pcc-cpufreq", }; @@ -621,7 +615,7 @@ static struct platform_driver pcc_cpufreq_platdrv = { .driver = { .name = "pcc-cpufreq", }, - .remove_new = pcc_cpufreq_remove, + .remove = pcc_cpufreq_remove, }; static int __init pcc_cpufreq_init(void) diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index df3567c1e93b..a22c22bd693a 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed) /* tweak L2 for high voltage */ if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); + hid2 = mfspr(SPRN_HID2_750FX); hid2 &= ~0x2000; - mtspr(SPRN_HID2, hid2); + mtspr(SPRN_HID2_750FX, hid2); } } #ifdef CONFIG_PPC_BOOK3S_32 @@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed) if (low_speed == 1) { /* tweak L2 for low voltage */ if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); + hid2 = mfspr(SPRN_HID2_750FX); hid2 |= 0x2000; - mtspr(SPRN_HID2, hid2); + mtspr(SPRN_HID2_750FX, hid2); } /* ramping down, set voltage last */ @@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = { .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, - .attr = cpufreq_generic_attr, .name = "powermac", }; diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 2cd2b06849a2..80897ec8f00e 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = g5_cpufreq_target, .get = g5_cpufreq_get_speed, - .attr = cpufreq_generic_attr, }; @@ -505,7 +504,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) continue; if (strcmp(loc, "CPU CLOCK")) continue; - if (!of_get_property(hwclock, "platform-get-frequency", NULL)) + if (!of_property_present(hwclock, "platform-get-frequency")) continue; break; } @@ -671,4 +670,5 @@ static int __init g5_cpufreq_init(void) module_init(g5_cpufreq_init); +MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 41eefef95d87..99d2244e03b0 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -219,7 +219,7 @@ have_busfreq: } -static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) +static void powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; @@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_target(policy, i); cpufreq_freq_transition_end(policy, &freqs, 0); - break; + return; } } - return 0; } static unsigned int powernow_k6_get(unsigned int cpu) @@ -254,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = { .exit = powernow_k6_cpu_exit, .get = powernow_k6_get, .name = "powernow-k6", - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id powernow_k6_ids[] = { diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 5d515fc34836..31039330a3ba 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -219,13 +219,13 @@ static void change_FID(int fid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -234,13 +234,13 @@ static void change_VID(int vid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) fid = powernow_table[index].driver_data & 0xFF; vid = (powernow_table[index].driver_data & 0xFF00) >> 8; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; @@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu) if (cpu) return 0; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; return fsb * fid_codes[cfid] / 10; @@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); @@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) return 0; } -static int powernow_cpu_exit(struct cpufreq_policy *policy) +static void powernow_cpu_exit(struct cpufreq_policy *policy) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { @@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) #endif kfree(powernow_table); - return 0; } static struct cpufreq_driver powernow_driver = { @@ -668,7 +667,6 @@ static struct cpufreq_driver powernow_driver = { .init = powernow_cpu_init, .exit = powernow_cpu_exit, .name = "powernow-k7", - .attr = cpufreq_generic_attr, }; static int __init powernow_init(void) diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index b10f7a1b77f1..f7512b4e923e 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc) cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - pr_info("Power state transitions not supported\n"); + pr_info_once("Power state transitions not supported\n"); return; } *rc = 0; @@ -1089,13 +1089,13 @@ err_out: return -ENODEV; } -static int powernowk8_cpu_exit(struct cpufreq_policy *pol) +static void powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); int cpu; if (!data) - return -EINVAL; + return; powernow_k8_cpu_exit_acpi(data); @@ -1104,8 +1104,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol) /* pol->cpus will be empty here, use related_cpus instead. */ for_each_cpu(cpu, pol->related_cpus) per_cpu(powernow_data, cpu) = NULL; - - return 0; } static void query_values_on_cpu(void *_err) @@ -1145,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = { .exit = powernowk8_cpu_exit, .get = powernowk8_get, .name = "powernow-k8", - .attr = cpufreq_generic_attr, }; static void __request_acpi_cpufreq(void) diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index fddbd1ea1635..afe5abf89d33 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/reboot.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/cpu.h> #include <linux/hashtable.h> #include <trace/events/power.h> @@ -281,7 +282,7 @@ next: pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min, pstate_nominal, pstate_max); pr_info("Workload Optimized Frequency is %s in the platform\n", - (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled"); + str_enabled_disabled(powernv_pstate_info.wof_enabled)); pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); if (!pstate_ids) { @@ -385,12 +386,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = __ATTR_RO(cpuinfo_nominal_freq); -#define SCALING_BOOST_FREQS_ATTR_INDEX 2 - static struct freq_attr *powernv_cpu_freq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, &cpufreq_freq_attr_cpuinfo_nominal_freq, - &cpufreq_freq_attr_scaling_boost_freqs, NULL, }; @@ -692,7 +689,7 @@ static void gpstate_timer_handler(struct timer_list *t) } /* - * If PMCR was last updated was using fast_swtich then + * If PMCR was last updated was using fast_switch then * We may have wrong in gpstate->last_lpstate_idx * value. Hence, read from PMCR to get correct data. */ @@ -805,7 +802,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, if (gpstate_idx != new_index) queue_gpstate_timer(gpstates); else - del_timer_sync(&gpstates->timer); + timer_delete_sync(&gpstates->timer); gpstates_done: freq_data.gpstate_id = idx_to_pstate(gpstate_idx); @@ -874,7 +871,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct powernv_smp_call_data freq_data; struct global_pstate_info *gpstates = policy->driver_data; @@ -883,11 +880,9 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); if (gpstates) - del_timer_sync(&gpstates->timer); + timer_delete_sync(&gpstates->timer); kfree(policy->driver_data); - - return 0; } static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, @@ -1129,9 +1124,7 @@ static int __init powernv_cpufreq_init(void) goto out; if (powernv_pstate_info.wof_enabled) - powernv_cpufreq_driver.boost_enabled = true; - else - powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL; + powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw; rc = cpufreq_register_driver(&powernv_cpufreq_driver); if (rc) { @@ -1139,9 +1132,6 @@ static int __init powernv_cpufreq_init(void) goto cleanup; } - if (powernv_pstate_info.wof_enabled) - cpufreq_enable_boost_support(); - register_reboot_notifier(&powernv_cpufreq_reboot_nb); opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); @@ -1162,5 +1152,6 @@ static void __exit powernv_cpufreq_exit(void) } module_exit(powernv_cpufreq_exit); +MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>"); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c deleted file mode 100644 index 88afc49941b7..000000000000 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * cpufreq driver for the cell processor - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/cpufreq.h> -#include <linux/module.h> -#include <linux/of.h> - -#include <asm/machdep.h> -#include <asm/cell-regs.h> - -#include "ppc_cbe_cpufreq.h" - -/* the CBE supports an 8 step frequency scaling */ -static struct cpufreq_frequency_table cbe_freqs[] = { - {0, 1, 0}, - {0, 2, 0}, - {0, 3, 0}, - {0, 4, 0}, - {0, 5, 0}, - {0, 6, 0}, - {0, 8, 0}, - {0, 10, 0}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -/* - * hardware specific functions - */ - -static int set_pmode(unsigned int cpu, unsigned int slow_mode) -{ - int rc; - - if (cbe_cpufreq_has_pmi) - rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); - else - rc = cbe_cpufreq_set_pmode(cpu, slow_mode); - - pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); - - return rc; -} - -/* - * cpufreq functions - */ - -static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - struct cpufreq_frequency_table *pos; - const u32 *max_freqp; - u32 max_freq; - int cur_pmode; - struct device_node *cpu; - - cpu = of_get_cpu_node(policy->cpu, NULL); - - if (!cpu) - return -ENODEV; - - pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - /* - * Let's check we can actually get to the CELL regs - */ - if (!cbe_get_cpu_pmd_regs(policy->cpu) || - !cbe_get_cpu_mic_tm_regs(policy->cpu)) { - pr_info("invalid CBE regs pointers for cpufreq\n"); - of_node_put(cpu); - return -EINVAL; - } - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - - of_node_put(cpu); - - if (!max_freqp) - return -EINVAL; - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - - pr_debug("max clock-frequency is at %u kHz\n", max_freq); - pr_debug("initializing frequency table\n"); - - /* initialize frequency table */ - cpufreq_for_each_entry(pos, cbe_freqs) { - pos->frequency = max_freq / pos->driver_data; - pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency); - } - - /* if DEBUG is enabled set_pmode() measures the latency - * of a transition */ - policy->cpuinfo.transition_latency = 25000; - - cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); - pr_debug("current pmode is at %d\n",cur_pmode); - - policy->cur = cbe_freqs[cur_pmode].frequency; - -#ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); -#endif - - policy->freq_table = cbe_freqs; - cbe_cpufreq_pmi_policy_init(policy); - return 0; -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cbe_cpufreq_pmi_policy_exit(policy); - return 0; -} - -static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int cbe_pmode_new) -{ - pr_debug("setting frequency for cpu %d to %d kHz, " \ - "1/%d of max frequency\n", - policy->cpu, - cbe_freqs[cbe_pmode_new].frequency, - cbe_freqs[cbe_pmode_new].driver_data); - - return set_pmode(policy->cpu, cbe_pmode_new); -} - -static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cpufreq_generic_frequency_table_verify, - .target_index = cbe_cpufreq_target, - .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, - .name = "cbe-cpufreq", - .flags = CPUFREQ_CONST_LOOPS, -}; - -/* - * module init and destoy - */ - -static int __init cbe_cpufreq_init(void) -{ - int ret; - - if (!machine_is(cell)) - return -ENODEV; - - cbe_cpufreq_pmi_init(); - - ret = cpufreq_register_driver(&cbe_cpufreq_driver); - if (ret) - cbe_cpufreq_pmi_exit(); - - return ret; -} - -static void __exit cbe_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&cbe_cpufreq_driver); - cbe_cpufreq_pmi_exit(); -} - -module_init(cbe_cpufreq_init); -module_exit(cbe_cpufreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h deleted file mode 100644 index 00cd8633b0d9..000000000000 --- a/drivers/cpufreq/ppc_cbe_cpufreq.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ppc_cbe_cpufreq.h - * - * This file contains the definitions used by the cbe_cpufreq driver. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - */ - -#include <linux/cpufreq.h> -#include <linux/types.h> - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); -int cbe_cpufreq_get_pmode(int cpu); - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); - -#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) -extern bool cbe_cpufreq_has_pmi; -void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy); -void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy); -void cbe_cpufreq_pmi_init(void); -void cbe_cpufreq_pmi_exit(void); -#else -#define cbe_cpufreq_has_pmi (0) -static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {} -static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {} -static inline void cbe_cpufreq_pmi_init(void) {} -static inline void cbe_cpufreq_pmi_exit(void) {} -#endif diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c deleted file mode 100644 index 04830cd95333..000000000000 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * pervasive backend for the cbe_cpufreq driver - * - * This driver makes use of the pervasive unit to - * engage the desired frequency. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/time.h> -#include <asm/machdep.h> -#include <asm/hw_irq.h> -#include <asm/cell-regs.h> - -#include "ppc_cbe_cpufreq.h" - -/* to write to MIC register */ -static u64 MIC_Slow_Fast_Timer_table[] = { - [0 ... 7] = 0x007fc00000000000ull, -}; - -/* more values for the MIC */ -static u64 MIC_Slow_Next_Timer_table[] = { - 0x0000240000000000ull, - 0x0000268000000000ull, - 0x000029C000000000ull, - 0x00002D0000000000ull, - 0x0000300000000000ull, - 0x0000334000000000ull, - 0x000039C000000000ull, - 0x00003FC000000000ull, -}; - - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - unsigned long flags; - u64 value; -#ifdef DEBUG - long time; -#endif - - local_irq_save(flags); - - mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - -#ifdef DEBUG - time = jiffies; -#endif - - out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); - - out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); - - value = in_be64(&pmd_regs->pmcr); - /* set bits to zero */ - value &= 0xFFFFFFFFFFFFFFF8ull; - /* set bits to next pmode */ - value |= pmode; - - out_be64(&pmd_regs->pmcr, value); - -#ifdef DEBUG - /* wait until new pmode appears in status register */ - value = in_be64(&pmd_regs->pmsr) & 0x07; - while (value != pmode) { - cpu_relax(); - value = in_be64(&pmd_regs->pmsr) & 0x07; - } - - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "pervasive unit\n", time); -#endif - local_irq_restore(flags); - - return 0; -} - - -int cbe_cpufreq_get_pmode(int cpu) -{ - int ret; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - ret = in_be64(&pmd_regs->pmsr) & 0x07; - - return ret; -} - diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c deleted file mode 100644 index 6f0c32592416..000000000000 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ /dev/null @@ -1,150 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * pmi backend for the cbe_cpufreq driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timer.h> -#include <linux/init.h> -#include <linux/pm_qos.h> -#include <linux/slab.h> - -#include <asm/processor.h> -#include <asm/pmi.h> -#include <asm/cell-regs.h> - -#ifdef DEBUG -#include <asm/time.h> -#endif - -#include "ppc_cbe_cpufreq.h" - -bool cbe_cpufreq_has_pmi = false; -EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); - -/* - * hardware specific functions - */ - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) -{ - int ret; - pmi_message_t pmi_msg; -#ifdef DEBUG - long time; -#endif - pmi_msg.type = PMI_TYPE_FREQ_CHANGE; - pmi_msg.data1 = cbe_cpu_to_node(cpu); - pmi_msg.data2 = pmode; - -#ifdef DEBUG - time = jiffies; -#endif - pmi_send_message(pmi_msg); - -#ifdef DEBUG - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "PMI\n", time); -#endif - ret = pmi_msg.data2; - pr_debug("PMI returned slow mode %d\n", ret); - - return ret; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); - - -static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) -{ - struct cpufreq_policy *policy; - struct freq_qos_request *req; - u8 node, slow_mode; - int cpu, ret; - - BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); - - node = pmi_msg.data1; - slow_mode = pmi_msg.data2; - - cpu = cbe_node_to_cpu(node); - - pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); - - policy = cpufreq_cpu_get(cpu); - if (!policy) { - pr_warn("cpufreq policy not found cpu%d\n", cpu); - return; - } - - req = policy->driver_data; - - ret = freq_qos_update_request(req, - policy->freq_table[slow_mode].frequency); - if (ret < 0) - pr_warn("Failed to update freq constraint: %d\n", ret); - else - pr_debug("limiting node %d to slow mode %d\n", node, slow_mode); - - cpufreq_cpu_put(policy); -} - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_FREQ_CHANGE, - .handle_pmi_message = cbe_cpufreq_handle_pmi, -}; - -void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) -{ - struct freq_qos_request *req; - int ret; - - if (!cbe_cpufreq_has_pmi) - return; - - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) - return; - - ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX, - policy->freq_table[0].frequency); - if (ret < 0) { - pr_err("Failed to add freq constraint (%d)\n", ret); - kfree(req); - return; - } - - policy->driver_data = req; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init); - -void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) -{ - struct freq_qos_request *req = policy->driver_data; - - if (cbe_cpufreq_has_pmi) { - freq_qos_remove_request(req); - kfree(req); - } -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit); - -void cbe_cpufreq_pmi_init(void) -{ - if (!pmi_register_handler(&cbe_pmi_handler)) - cbe_cpufreq_has_pmi = true; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init); - -void cbe_cpufreq_pmi_exit(void) -{ - pmi_unregister_handler(&cbe_pmi_handler); - cbe_cpufreq_has_pmi = false; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 70b0f21968a0..8422704a3b10 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/interconnect.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -142,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) } /* Get the frequency requested by the cpufreq core for the CPU */ -static unsigned int qcom_cpufreq_get_freq(unsigned int cpu) +static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy) { struct qcom_cpufreq_data *data; const struct qcom_cpufreq_soc_data *soc_data; - struct cpufreq_policy *policy; unsigned int index; - policy = cpufreq_cpu_get_raw(cpu); if (!policy) return 0; @@ -162,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu) return policy->freq_table[index].frequency; } -static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) +static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy) { struct qcom_cpufreq_data *data; - struct cpufreq_policy *policy; - policy = cpufreq_cpu_get_raw(cpu); if (!policy) return 0; @@ -176,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) if (data->throttle_irq >= 0) return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ; - return qcom_cpufreq_get_freq(cpu); + return qcom_cpufreq_get_freq(policy); +} + +static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) +{ + return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu)); } static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, @@ -304,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) struct of_phandle_args args; int cpu, ret; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpu_np = of_cpu_device_node_get(cpu); if (!cpu_np) continue; @@ -347,8 +349,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) throttled_freq = freq_hz / HZ_PER_KHZ; - /* Update thermal pressure (the boost frequencies are accepted) */ - arch_update_thermal_pressure(policy->related_cpus, throttled_freq); + /* Update HW pressure (the boost frequencies are accepted) */ + arch_update_hw_pressure(policy->related_cpus, throttled_freq); /* * In the unlikely case policy is unregistered do not enable @@ -362,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) * If h/w throttled frequency is higher than what cpufreq has requested * for, then stop polling and switch back to interrupt mechanism. */ - if (throttled_freq >= qcom_cpufreq_get_freq(cpu)) + if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu))) enable_irq(data->throttle_irq); else mod_delayed_work(system_highpri_wq, &data->throttle_work, @@ -440,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) return data->throttle_irq; data->cancel_throttle = false; - data->policy = policy; mutex_init(&data->throttle_lock); INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); @@ -551,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) policy->driver_data = data; policy->dvfs_possible_from_any_cpu = true; + data->policy = policy; ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); if (ret) { @@ -564,16 +566,10 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } - if (policy_has_boost_freq(policy)) { - ret = cpufreq_enable_boost_support(); - if (ret) - dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); - } - return qcom_cpufreq_hw_lmh_init(policy, index); } -static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { struct device *cpu_dev = get_cpu_device(policy->cpu); struct qcom_cpufreq_data *data = policy->driver_data; @@ -583,8 +579,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) qcom_cpufreq_hw_lmh_exit(data); kfree(policy->freq_table); kfree(data); - - return 0; } static void qcom_cpufreq_ready(struct cpufreq_policy *policy) @@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy) enable_irq(data->throttle_irq); } -static struct freq_attr *qcom_cpufreq_hw_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - &cpufreq_freq_attr_scaling_boost_freqs, - NULL -}; - static struct cpufreq_driver cpufreq_qcom_hw_driver = { .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | @@ -615,19 +603,32 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .register_em = cpufreq_register_em_with_opp, .fast_switch = qcom_cpufreq_hw_fast_switch, .name = "qcom-cpufreq-hw", - .attr = qcom_cpufreq_hw_attr, .ready = qcom_cpufreq_ready, + .set_boost = cpufreq_boost_set_sw, }; static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk); - return qcom_lmh_get_throttle_freq(data); + return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ; +} + +/* + * Since we cannot determine the closest rate of the target rate, let's just + * return the actual rate at which the clock is running at. This is needed to + * make clk_set_rate() API work properly. + */ +static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) +{ + req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0); + + return 0; } static const struct clk_ops qcom_cpufreq_hw_clk_ops = { .recalc_rate = qcom_cpufreq_hw_recalc_rate, + .determine_rate = qcom_cpufreq_hw_determine_rate, }; static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) @@ -737,7 +738,7 @@ static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev) static struct platform_driver qcom_cpufreq_hw_driver = { .probe = qcom_cpufreq_hw_driver_probe, - .remove_new = qcom_cpufreq_hw_driver_remove, + .remove = qcom_cpufreq_hw_driver_remove, .driver = { .name = "qcom-cpufreq-hw", .of_match_table = qcom_cpufreq_hw_match, diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index ea05d9d67490..54f8117103c8 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -52,12 +52,13 @@ struct qcom_cpufreq_match_data { struct nvmem_cell *speedbin_nvmem, char **pvs_name, struct qcom_cpufreq_drv *drv); - const char **genpd_names; + const char **pd_names; + unsigned int num_pd_names; }; struct qcom_cpufreq_drv_cpu { int opp_token; - struct device **virt_devs; + struct dev_pm_domain_list *pd_list; }; struct qcom_cpufreq_drv { @@ -191,6 +192,7 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, case QCOM_ID_IPQ5312: case QCOM_ID_IPQ5302: case QCOM_ID_IPQ5300: + case QCOM_ID_IPQ5321: case QCOM_ID_IPQ9514: case QCOM_ID_IPQ9550: case QCOM_ID_IPQ9554: @@ -394,8 +396,6 @@ static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev, return 0; } -static const char *generic_genpd_names[] = { "perf", NULL }; - static const struct qcom_cpufreq_match_data match_data_kryo = { .get_version = qcom_cpufreq_kryo_name_version, }; @@ -406,13 +406,13 @@ static const struct qcom_cpufreq_match_data match_data_krait = { static const struct qcom_cpufreq_match_data match_data_msm8909 = { .get_version = qcom_cpufreq_simple_get_version, - .genpd_names = generic_genpd_names, + .pd_names = (const char *[]) { "perf" }, + .num_pd_names = 1, }; -static const char *qcs404_genpd_names[] = { "cpr", NULL }; - static const struct qcom_cpufreq_match_data match_data_qcs404 = { - .genpd_names = qcs404_genpd_names, + .pd_names = (const char *[]) { "cpr" }, + .num_pd_names = 1, }; static const struct qcom_cpufreq_match_data match_data_ipq6018 = { @@ -427,35 +427,22 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = { .get_version = qcom_cpufreq_ipq8074_name_version, }; -static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu) -{ - const char * const *name = drv->data->genpd_names; - int i; - - if (!drv->cpus[cpu].virt_devs) - return; - - for (i = 0; *name; i++, name++) - device_set_awake_path(drv->cpus[cpu].virt_devs[i]); -} - -static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu) +static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu) { - const char * const *name = drv->data->genpd_names; + struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list; int i; - if (!drv->cpus[cpu].virt_devs) + if (!pd_list) return; - for (i = 0; *name; i++, name++) - pm_runtime_put(drv->cpus[cpu].virt_devs[i]); + for (i = 0; i < pd_list->num_pds; i++) + device_set_awake_path(pd_list->pd_devs[i]); } static int qcom_cpufreq_probe(struct platform_device *pdev) { struct qcom_cpufreq_drv *drv; struct nvmem_cell *speedbin_nvmem; - struct device_node *np; struct device *cpu_dev; char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; char *pvs_name = pvs_name_buffer; @@ -467,16 +454,15 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (!cpu_dev) return -ENODEV; - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!np) return -ENOENT; ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") || of_device_is_compatible(np, "operating-points-v2-krait-cpu"); - if (!ret) { - of_node_put(np); + if (!ret) return -ENOENT; - } drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), GFP_KERNEL); @@ -502,10 +488,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } nvmem_cell_put(speedbin_nvmem); } - of_node_put(np); - for_each_possible_cpu(cpu) { - struct device **virt_devs = NULL; + for_each_present_cpu(cpu) { struct dev_pm_opp_config config = { .supported_hw = NULL, }; @@ -524,12 +508,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) config.prop_name = pvs_name; } - if (drv->data->genpd_names) { - config.genpd_names = drv->data->genpd_names; - config.virt_devs = &virt_devs; - } - - if (config.supported_hw || config.genpd_names) { + if (config.supported_hw) { drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config); if (drv->cpus[cpu].opp_token < 0) { ret = drv->cpus[cpu].opp_token; @@ -538,25 +517,18 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) } } - if (virt_devs) { - const char * const *name = config.genpd_names; - int i, j; - - for (i = 0; *name; i++, name++) { - ret = pm_runtime_resume_and_get(virt_devs[i]); - if (ret) { - dev_err(cpu_dev, "failed to resume %s: %d\n", - *name, ret); - - /* Rollback previous PM runtime calls */ - name = config.genpd_names; - for (j = 0; *name && j < i; j++, name++) - pm_runtime_put(virt_devs[j]); - - goto free_opp; - } - } - drv->cpus[cpu].virt_devs = virt_devs; + if (drv->data->pd_names) { + struct dev_pm_domain_attach_data attach_data = { + .pd_names = drv->data->pd_names, + .num_pd_names = drv->data->num_pd_names, + .pd_flags = PD_FLAG_DEV_LINK_ON | + PD_FLAG_REQUIRED_OPP, + }; + + ret = dev_pm_domain_attach_list(cpu_dev, &attach_data, + &drv->cpus[cpu].pd_list); + if (ret < 0) + goto free_opp; } } @@ -571,8 +543,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) dev_err(cpu_dev, "Failed to register platform device\n"); free_opp: - for_each_possible_cpu(cpu) { - qcom_cpufreq_put_virt_devs(drv, cpu); + for_each_present_cpu(cpu) { + dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); } return ret; @@ -585,8 +557,8 @@ static void qcom_cpufreq_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); - for_each_possible_cpu(cpu) { - qcom_cpufreq_put_virt_devs(drv, cpu); + for_each_present_cpu(cpu) { + dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); } } @@ -596,8 +568,8 @@ static int qcom_cpufreq_suspend(struct device *dev) struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev); unsigned int cpu; - for_each_possible_cpu(cpu) - qcom_cpufreq_suspend_virt_devs(drv, cpu); + for_each_present_cpu(cpu) + qcom_cpufreq_suspend_pd_devs(drv, cpu); return 0; } @@ -606,14 +578,14 @@ static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL) static struct platform_driver qcom_cpufreq_driver = { .probe = qcom_cpufreq_probe, - .remove_new = qcom_cpufreq_remove, + .remove = qcom_cpufreq_remove, .driver = { .name = "qcom-cpufreq-nvmem", .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops), }, }; -static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { +static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = { { .compatible = "qcom,apq8096", .data = &match_data_kryo }, { .compatible = "qcom,msm8909", .data = &match_data_msm8909 }, { .compatible = "qcom,msm8996", .data = &match_data_kryo }, @@ -638,7 +610,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list); */ static int __init qcom_cpufreq_init(void) { - struct device_node *np = of_find_node_by_path("/"); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; int ret; @@ -646,7 +618,6 @@ static int __init qcom_cpufreq_init(void) return -ENODEV; match = of_match_node(qcom_cpufreq_match_list, np); - of_node_put(np); if (!match) return -ENODEV; diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 0aecaecbb0e6..8d1f5ac59132 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -225,7 +225,7 @@ err_np: return -ENODEV; } -static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cpu_data *data = policy->driver_data; @@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) kfree(data->table); kfree(data); policy->driver_data = NULL; - - return 0; } static int qoriq_cpufreq_target(struct cpufreq_policy *policy, @@ -256,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = qoriq_cpufreq_target, .get = cpufreq_generic_get, - .attr = cpufreq_generic_attr, }; static const struct of_device_id qoriq_cpufreq_blacklist[] = { @@ -298,7 +295,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = { .name = "qoriq-cpufreq", }, .probe = qoriq_cpufreq_probe, - .remove_new = qoriq_cpufreq_remove, + .remove = qoriq_cpufreq_remove, }; module_platform_driver(qoriq_cpufreq_platform_driver); diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c index e0705cc9a57d..5050932954e3 100644 --- a/drivers/cpufreq/raspberrypi-cpufreq.c +++ b/drivers/cpufreq/raspberrypi-cpufreq.c @@ -85,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = { .name = "raspberrypi-cpufreq", }, .probe = raspberrypi_cpufreq_probe, - .remove_new = raspberrypi_cpufreq_remove, + .remove = raspberrypi_cpufreq_remove, }; module_platform_driver(raspberrypi_cpufreq_driver); diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs new file mode 100644 index 000000000000..94ed81644fe1 --- /dev/null +++ b/drivers/cpufreq/rcpufreq_dt.rs @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Rust based implementation of the cpufreq-dt driver. + +use kernel::{ + c_str, + clk::Clk, + cpu, cpufreq, + cpumask::CpumaskVar, + device::{Core, Device}, + error::code::*, + fmt, + macros::vtable, + module_platform_driver, of, opp, platform, + prelude::*, + str::CString, + sync::Arc, +}; + +/// Finds exact supply name from the OF node. +fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> { + let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?; + dev.property_present(&prop_name) + .then(|| CString::try_from_fmt(fmt!("{name}")).ok()) + .flatten() +} + +/// Finds supply name for the CPU from DT. +fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> { + // Try "cpu0" for older DTs, fallback to "cpu". + let name = (cpu == 0) + .then(|| find_supply_name_exact(dev, "cpu0")) + .flatten() + .or_else(|| find_supply_name_exact(dev, "cpu"))?; + + let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?; + list.push(name, GFP_KERNEL).ok()?; + + Some(list) +} + +/// Represents the cpufreq dt device. +struct CPUFreqDTDevice { + opp_table: opp::Table, + freq_table: opp::FreqTable, + _mask: CpumaskVar, + _token: Option<opp::ConfigToken>, + _clk: Clk, +} + +#[derive(Default)] +struct CPUFreqDTDriver; + +#[vtable] +impl opp::ConfigOps for CPUFreqDTDriver {} + +#[vtable] +impl cpufreq::Driver for CPUFreqDTDriver { + const NAME: &'static CStr = c_str!("cpufreq-dt"); + const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV; + const BOOST_ENABLED: bool = true; + + type PData = Arc<CPUFreqDTDevice>; + + fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> { + let cpu = policy.cpu(); + // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq + // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device, + // once the CPU is hot-unplugged. + let dev = unsafe { cpu::from_cpu(cpu)? }; + let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?; + + mask.set(cpu); + + let token = find_supply_names(dev, cpu) + .map(|names| { + opp::Config::<Self>::new() + .set_regulator_names(names)? + .set(dev) + }) + .transpose()?; + + // Get OPP-sharing information from "operating-points-v2" bindings. + let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) { + Ok(()) => false, + Err(e) if e == ENOENT => { + // "operating-points-v2" not supported. If the platform hasn't + // set sharing CPUs, fallback to all CPUs share the `Policy` + // for backward compatibility. + opp::Table::sharing_cpus(dev, &mut mask).is_err() + } + Err(e) => return Err(e), + }; + + // Initialize OPP tables for all policy cpus. + // + // For platforms not using "operating-points-v2" bindings, we do this + // before updating policy cpus. Otherwise, we will end up creating + // duplicate OPPs for the CPUs. + // + // OPPs might be populated at runtime, don't fail for error here unless + // it is -EPROBE_DEFER. + let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) { + Ok(table) => table, + Err(e) => { + if e == EPROBE_DEFER { + return Err(e); + } + + // The table is added dynamically ? + opp::Table::from_dev(dev)? + } + }; + + // The OPP table must be initialized, statically or dynamically, by this point. + opp_table.opp_count()?; + + // Set sharing cpus for fallback scenario. + if fallback { + mask.setall(); + opp_table.set_sharing_cpus(&mut mask)?; + } + + let mut transition_latency = opp_table.max_transition_latency_ns() as u32; + if transition_latency == 0 { + transition_latency = cpufreq::ETERNAL_LATENCY_NS; + } + + policy + .set_dvfs_possible_from_any_cpu(true) + .set_suspend_freq(opp_table.suspend_freq()) + .set_transition_latency_ns(transition_latency); + + let freq_table = opp_table.cpufreq_table()?; + // SAFETY: The `freq_table` is not dropped while it is getting used by the C code. + unsafe { policy.set_freq_table(&freq_table) }; + + // SAFETY: The returned `clk` is not dropped while it is getting used by the C code. + let clk = unsafe { policy.set_clk(dev, None)? }; + + mask.copy(policy.cpus()); + + Ok(Arc::new( + CPUFreqDTDevice { + opp_table, + freq_table, + _mask: mask, + _token: token, + _clk: clk, + }, + GFP_KERNEL, + )?) + } + + fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result { + Ok(()) + } + + fn online(_policy: &mut cpufreq::Policy) -> Result { + // We did light-weight tear down earlier, nothing to do here. + Ok(()) + } + + fn offline(_policy: &mut cpufreq::Policy) -> Result { + // Preserve policy->data and don't free resources on light-weight + // tear down. + Ok(()) + } + + fn suspend(policy: &mut cpufreq::Policy) -> Result { + policy.generic_suspend() + } + + fn verify(data: &mut cpufreq::PolicyData) -> Result { + data.generic_verify() + } + + fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result { + let Some(data) = policy.data::<Self::PData>() else { + return Err(ENOENT); + }; + + let freq = data.freq_table.freq(index)?; + data.opp_table.set_rate(freq) + } + + fn get(policy: &mut cpufreq::Policy) -> Result<u32> { + policy.generic_get() + } + + fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result { + Ok(()) + } + + fn register_em(policy: &mut cpufreq::Policy) { + policy.register_em_opp() + } +} + +kernel::of_device_table!( + OF_TABLE, + MODULE_OF_TABLE, + <CPUFreqDTDriver as platform::Driver>::IdInfo, + [(of::DeviceId::new(c_str!("operating-points-v2")), ())] +); + +impl platform::Driver for CPUFreqDTDriver { + type IdInfo = (); + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); + + fn probe( + pdev: &platform::Device<Core>, + _id_info: Option<&Self::IdInfo>, + ) -> Result<Pin<KBox<Self>>> { + cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?; + Ok(KBox::new(Self {}, GFP_KERNEL)?.into()) + } +} + +module_platform_driver! { + type: CPUFreqDTDriver, + name: "cpufreq-dt", + author: "Viresh Kumar <viresh.kumar@linaro.org>", + description: "Generic CPUFreq DT driver", + license: "GPL v2", +} diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index c6bdfc308e99..9cef71528076 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -24,6 +24,7 @@ struct s3c64xx_dvfs { unsigned int vddarm_max; }; +#ifdef CONFIG_REGULATOR static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { [0] = { 1000000, 1150000 }, [1] = { 1050000, 1150000 }, @@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { [3] = { 1200000, 1350000 }, [4] = { 1300000, 1350000 }, }; +#endif static struct cpufreq_frequency_table s3c64xx_freq_table[] = { { 0, 0, 66000 }, @@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = { static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct s3c64xx_dvfs *dvfs; - unsigned int old_freq, new_freq; + unsigned int new_freq = s3c64xx_freq_table[index].frequency; int ret; +#ifdef CONFIG_REGULATOR + struct s3c64xx_dvfs *dvfs; + unsigned int old_freq; + old_freq = clk_get_rate(policy->clk) / 1000; - new_freq = s3c64xx_freq_table[index].frequency; dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; -#ifdef CONFIG_REGULATOR if (vddarm && new_freq > old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 330c8d6cf93c..b360f03a116f 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -21,7 +21,6 @@ #include <linux/io.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ @@ -92,7 +91,6 @@ static struct cpufreq_driver sc520_freq_driver = { .target_index = sc520_freq_target, .init = sc520_freq_cpu_init, .name = "sc520_freq", - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id sc520_ids[] = { diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 3b4f6bfb2f4c..ef078426bfd5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/module.h> #include <linux/pm_opp.h> +#include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/scmi_protocol.h> #include <linux/types.h> @@ -26,6 +27,8 @@ struct scmi_data { int nr_opp; struct device *cpu_dev; cpumask_var_t opp_shared_cpus; + struct notifier_block limit_notify_nb; + struct freq_qos_request limits_freq_req; }; static struct scmi_protocol_handle *ph; @@ -34,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver; static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scmi_data *priv = policy->driver_data; + struct cpufreq_policy *policy; + struct scmi_data *priv; unsigned long rate; int ret; + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false); if (ret) return 0; @@ -63,9 +72,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct scmi_data *priv = policy->driver_data; + unsigned long freq = target_freq; - if (!perf_ops->freq_set(ph, priv->domain_id, - target_freq * 1000, true)) + if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true)) return target_freq; return 0; @@ -101,7 +110,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain, int cpu, tdomain; struct device *tcpu_dev; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { if (cpu == cpu_dev->id) continue; @@ -168,11 +177,21 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch) return rate_limit; } -static struct freq_attr *scmi_cpufreq_hw_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, - NULL, -}; +static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data) +{ + struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb); + struct scmi_perf_limits_report *limit_notify = data; + unsigned int limit_freq_khz; + int ret; + + limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ; + + ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz); + if (ret < 0) + pr_warn("failed to update freq constraint: %d\n", ret); + + return NOTIFY_OK; +} static int scmi_cpufreq_init(struct cpufreq_policy *policy) { @@ -181,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) struct device *cpu_dev; struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; + struct scmi_device *sdev = cpufreq_get_driver_data(); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -283,19 +303,27 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->transition_delay_us = scmi_get_rate_limit(domain, policy->fast_switch_possible); - if (policy_has_boost_freq(policy)) { - ret = cpufreq_enable_boost_support(); - if (ret) { - dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); - goto out_free_opp; - } else { - scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; - scmi_cpufreq_driver.boost_enabled = true; - } + ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); + if (ret < 0) { + dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret); + goto out_free_table; } + priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb; + ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF, + SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED, + &priv->domain_id, + &priv->limit_notify_nb); + if (ret) + dev_warn(&sdev->dev, + "failed to register for limits change notifier for domain %d\n", + priv->domain_id); + return 0; +out_free_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_opp: dev_pm_opp_remove_all_dynamic(cpu_dev); @@ -308,16 +336,20 @@ out_free_priv: return ret; } -static int scmi_cpufreq_exit(struct cpufreq_policy *policy) +static void scmi_cpufreq_exit(struct cpufreq_policy *policy) { struct scmi_data *priv = policy->driver_data; + struct scmi_device *sdev = cpufreq_get_driver_data(); + sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF, + SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED, + &priv->domain_id, + &priv->limit_notify_nb); + freq_qos_remove_request(&priv->limits_freq_req); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); free_cpumask_var(priv->opp_shared_cpus); kfree(priv); - - return 0; } static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) @@ -352,15 +384,49 @@ static struct cpufreq_driver scmi_cpufreq_driver = { CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, - .attr = scmi_cpufreq_hw_attr, .target_index = scmi_cpufreq_set_target, .fast_switch = scmi_cpufreq_fast_switch, .get = scmi_cpufreq_get_rate, .init = scmi_cpufreq_init, .exit = scmi_cpufreq_exit, .register_em = scmi_cpufreq_register_em, + .set_boost = cpufreq_boost_set_sw, }; +static bool scmi_dev_used_by_cpus(struct device *scmi_dev) +{ + struct device_node *scmi_np = dev_of_node(scmi_dev); + struct device_node *cpu_np, *np; + struct device *cpu_dev; + int cpu, idx; + + if (!scmi_np) + return false; + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + continue; + + cpu_np = dev_of_node(cpu_dev); + + np = of_parse_phandle(cpu_np, "clocks", 0); + of_node_put(np); + + if (np == scmi_np) + return true; + + idx = of_property_match_string(cpu_np, "power-domain-names", "perf"); + np = of_parse_phandle(cpu_np, "power-domains", idx); + of_node_put(np); + + if (np == scmi_np) + return true; + } + + return false; +} + static int scmi_cpufreq_probe(struct scmi_device *sdev) { int ret; @@ -369,9 +435,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) handle = sdev->handle; - if (!handle) + if (!handle || !scmi_dev_used_by_cpus(dev)) return -ENODEV; + scmi_cpufreq_driver.driver_data = sdev; + perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph); if (IS_ERR(perf_ops)) return PTR_ERR(perf_ops); diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index d33be56983ed..dcbb0ae7dd47 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops; static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scpi_data *priv = policy->driver_data; - unsigned long rate = clk_get_rate(priv->clk); + struct cpufreq_policy *policy; + struct scpi_data *priv; + unsigned long rate; + + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + rate = clk_get_rate(priv->clk); return rate / 1000; } @@ -39,8 +46,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) static int scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - u64 rate = policy->freq_table[index].frequency * 1000; + unsigned long freq_khz = policy->freq_table[index].frequency; struct scpi_data *priv = policy->driver_data; + unsigned long rate = freq_khz * 1000; int ret; ret = clk_set_rate(priv->clk, rate); @@ -48,7 +56,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret; - if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 != freq_khz) return -EIO; return 0; @@ -64,7 +72,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) if (domain < 0) return domain; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { if (cpu == cpu_dev->id) continue; @@ -167,7 +175,7 @@ out_free_opp: return ret; } -static int scpi_cpufreq_exit(struct cpufreq_policy *policy) +static void scpi_cpufreq_exit(struct cpufreq_policy *policy) { struct scpi_data *priv = policy->driver_data; @@ -175,8 +183,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); kfree(priv); - - return 0; } static struct cpufreq_driver scpi_cpufreq_driver = { @@ -185,7 +191,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = { CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, - .attr = cpufreq_generic_attr, .get = scpi_cpufreq_get_rate, .init = scpi_cpufreq_init, .exit = scpi_cpufreq_exit, @@ -219,7 +224,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { .name = "scpi-cpufreq", }, .probe = scpi_cpufreq_probe, - .remove_new = scpi_cpufreq_remove, + .remove = scpi_cpufreq_remove, }; module_platform_driver(scpi_cpufreq_platdrv); diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index b8704232c27b..9c0b01e00508 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -135,14 +135,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); clk_put(cpuclk); - - return 0; } static struct cpufreq_driver sh_cpufreq_driver = { @@ -153,7 +151,6 @@ static struct cpufreq_driver sh_cpufreq_driver = { .verify = sh_cpufreq_verify, .init = sh_cpufreq_cpu_init, .exit = sh_cpufreq_cpu_exit, - .attr = cpufreq_generic_attr, }; static int __init sh_cpufreq_module_init(void) diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index 2783d3d55fce..15899dd77c08 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c @@ -296,10 +296,9 @@ static int us2e_freq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) +static void us2e_freq_cpu_exit(struct cpufreq_policy *policy) { us2e_freq_target(policy, 0); - return 0; } static struct cpufreq_driver cpufreq_us2e_driver = { @@ -324,7 +323,7 @@ static int __init us2e_freq_init(void) impl = ((ver >> 32) & 0xffff); if (manuf == 0x17 && impl == 0x13) { - us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table), + us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table), GFP_KERNEL); if (!us2e_freq_table) return -ENOMEM; diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index 6c3657679a88..de50a2f3b124 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -140,10 +140,9 @@ static int us3_freq_cpu_init(struct cpufreq_policy *policy) return 0; } -static int us3_freq_cpu_exit(struct cpufreq_policy *policy) +static void us3_freq_cpu_exit(struct cpufreq_policy *policy) { us3_freq_target(policy, 0); - return 0; } static struct cpufreq_driver cpufreq_us3_driver = { @@ -172,7 +171,7 @@ static int __init us3_freq_init(void) impl == CHEETAH_PLUS_IMPL || impl == JAGUAR_IMPL || impl == PANTHER_IMPL)) { - us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table), + us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table), GFP_KERNEL); if (!us3_freq_table) return -ENOMEM; diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 78b875db6b66..707c71090cc3 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -165,16 +165,14 @@ static struct cpufreq_driver spear_cpufreq_driver = { .target_index = spear_cpufreq_target, .get = cpufreq_generic_get, .init = spear_cpufreq_init, - .attr = cpufreq_generic_attr, }; static int spear_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - const struct property *prop; struct cpufreq_frequency_table *freq_tbl; - const __be32 *val; - int cnt, i, ret; + u32 val; + int cnt, ret, i = 0; np = of_cpu_device_node_get(0); if (!np) { @@ -186,26 +184,23 @@ static int spear_cpufreq_probe(struct platform_device *pdev) &spear_cpufreq.transition_latency)) spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; - prop = of_find_property(np, "cpufreq_tbl", NULL); - if (!prop || !prop->value) { + cnt = of_property_count_u32_elems(np, "cpufreq_tbl"); + if (cnt <= 0) { pr_err("Invalid cpufreq_tbl\n"); ret = -ENODEV; goto out_put_node; } - cnt = prop->length / sizeof(u32); - val = prop->value; - freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL); if (!freq_tbl) { ret = -ENOMEM; goto out_put_node; } - for (i = 0; i < cnt; i++) - freq_tbl[i].frequency = be32_to_cpup(val++); + of_property_for_each_u32(np, "cpufreq_tbl", val) + freq_tbl[i++].frequency = val; - freq_tbl[i].frequency = CPUFREQ_TABLE_END; + freq_tbl[cnt].frequency = CPUFREQ_TABLE_END; spear_cpufreq.freq_tbl = freq_tbl; diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 75b10ecdb60f..3e6e85a92212 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) return 0; } -static int centrino_cpu_exit(struct cpufreq_policy *policy) +static void centrino_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; - if (!per_cpu(centrino_model, cpu)) - return -ENODEV; - - per_cpu(centrino_model, cpu) = NULL; - - return 0; + if (per_cpu(centrino_model, cpu)) + per_cpu(centrino_model, cpu) = NULL; } /** @@ -511,7 +507,6 @@ static struct cpufreq_driver centrino_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = centrino_target, .get = get_cur_freq, - .attr = cpufreq_generic_attr, }; /* @@ -520,10 +515,10 @@ static struct cpufreq_driver centrino_driver = { * or ASCII model IDs. */ static const struct x86_cpu_id centrino_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL), - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL), + X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL), {} }; diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index f2076d72bf39..262cfbde9ca7 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = { .target_index = speedstep_target, .init = speedstep_cpu_init, .get = speedstep_get, - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 0ce9d4b6dfcc..39265884c3f1 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = { .init = speedstep_cpu_init, .get = speedstep_get, .resume = speedstep_resume, - .attr = cpufreq_generic_attr, }; static const struct x86_cpu_id ss_smi_ids[] = { diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 9c542e723a15..b15b3142b5fe 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -18,7 +18,7 @@ #include <linux/regmap.h> #define VERSION_ELEMENTS 3 -#define MAX_PCODE_NAME_LEN 7 +#define MAX_PCODE_NAME_LEN 16 #define VERSION_SHIFT 28 #define HW_INFO_INDEX 1 @@ -267,7 +267,7 @@ static int __init sti_cpufreq_init(void) goto skip_voltage_scaling; } - if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) { + if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) { dev_err(ddata.cpu, "OPP-v2 not supported\n"); goto skip_voltage_scaling; } @@ -293,6 +293,7 @@ module_init(sti_cpufreq_init); static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { { .compatible = "st,stih407" }, { .compatible = "st,stih410" }, + { .compatible = "st,stih418" }, { }, }; MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 32a9c88f8ff6..744312a44279 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/arm-smccc.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> @@ -18,25 +19,183 @@ #include <linux/pm_opp.h> #include <linux/slab.h> -#define MAX_NAME_LEN 7 - #define NVMEM_MASK 0x7 #define NVMEM_SHIFT 5 +#define SUN50I_A100_NVMEM_MASK 0xf +#define SUN50I_A100_NVMEM_SHIFT 12 + static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev; +struct sunxi_cpufreq_data { + u32 (*efuse_xlate)(u32 speedbin); +}; + +static u32 sun50i_h6_efuse_xlate(u32 speedbin) +{ + u32 efuse_value; + + efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK; + + /* + * We treat unexpected efuse values as if the SoC was from + * the slowest bin. Expected efuse values are 1-3, slowest + * to fastest. + */ + if (efuse_value >= 1 && efuse_value <= 3) + return efuse_value - 1; + else + return 0; +} + +static u32 sun50i_a100_efuse_xlate(u32 speedbin) +{ + u32 efuse_value; + + efuse_value = (speedbin >> SUN50I_A100_NVMEM_SHIFT) & + SUN50I_A100_NVMEM_MASK; + + switch (efuse_value) { + case 0b100: + return 2; + case 0b010: + return 1; + default: + return 0; + } +} + +static int get_soc_id_revision(void) +{ +#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY + return arm_smccc_get_soc_id_revision(); +#else + return SMCCC_RET_NOT_SUPPORTED; +#endif +} + +/* + * Judging by the OPP tables in the vendor BSP, the quality order of the + * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best. + * 0 and 2 seem identical from the OPP tables' point of view. + */ +static u32 sun50i_h616_efuse_xlate(u32 speedbin) +{ + int ver_bits = get_soc_id_revision(); + u32 value = 0; + + switch (speedbin & 0xffff) { + case 0x2000: + value = 0; + break; + case 0x2400: + case 0x7400: + case 0x2c00: + case 0x7c00: + if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) { + /* ic version A/B */ + value = 1; + } else { + /* ic version C and later version */ + value = 2; + } + break; + case 0x5000: + case 0x5400: + case 0x6000: + value = 3; + break; + case 0x5c00: + value = 4; + break; + case 0x5d00: + value = 0; + break; + case 0x6c00: + value = 5; + break; + default: + pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n", + speedbin & 0xffff); + value = 0; + break; + } + + return value; +} + +static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = { + .efuse_xlate = sun50i_h6_efuse_xlate, +}; + +static struct sunxi_cpufreq_data sun50i_a100_cpufreq_data = { + .efuse_xlate = sun50i_a100_efuse_xlate, +}; + +static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = { + .efuse_xlate = sun50i_h616_efuse_xlate, +}; + +static const struct of_device_id cpu_opp_match_list[] = { + { .compatible = "allwinner,sun50i-h6-operating-points", + .data = &sun50i_h6_cpufreq_data, + }, + { .compatible = "allwinner,sun50i-a100-operating-points", + .data = &sun50i_a100_cpufreq_data, + }, + { .compatible = "allwinner,sun50i-h616-operating-points", + .data = &sun50i_h616_cpufreq_data, + }, + {} +}; + +/** + * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw + * + * If we ask the cpufreq framework to use the opp-supported-hw feature, it + * will ignore every OPP node without that DT property. If none of the OPPs + * have it, the driver will fail probing, due to the lack of OPPs. + * + * Returns true if we have at least one OPP with the opp-supported-hw property. + */ +static bool dt_has_supported_hw(void) +{ + bool has_opp_supported_hw = false; + struct device *cpu_dev; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return false; + + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!np) + return false; + + for_each_child_of_node_scoped(np, opp) { + if (of_property_present(opp, "opp-supported-hw")) { + has_opp_supported_hw = true; + break; + } + } + + return has_opp_supported_hw; +} + /** * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value - * @versions: Set to the value parsed from efuse * - * Returns 0 if success. + * Returns non-negative speed bin index on success, a negative error + * value otherwise. */ -static int sun50i_cpufreq_get_efuse(u32 *versions) +static int sun50i_cpufreq_get_efuse(void) { + const struct sunxi_cpufreq_data *opp_data; struct nvmem_cell *speedbin_nvmem; - struct device_node *np; + const struct of_device_id *match; struct device *cpu_dev; - u32 *speedbin, efuse_value; + void *speedbin_ptr; + u32 speedbin = 0; size_t len; int ret; @@ -44,50 +203,45 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) if (!cpu_dev) return -ENODEV; - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + struct device_node *np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!np) return -ENOENT; - ret = of_device_is_compatible(np, - "allwinner,sun50i-h6-operating-points"); - if (!ret) { - of_node_put(np); + match = of_match_node(cpu_opp_match_list, np); + if (!match) return -ENOENT; - } + + opp_data = match->data; speedbin_nvmem = of_nvmem_cell_get(np, NULL); - of_node_put(np); if (IS_ERR(speedbin_nvmem)) return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); - speedbin = nvmem_cell_read(speedbin_nvmem, &len); + speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len); nvmem_cell_put(speedbin_nvmem); - if (IS_ERR(speedbin)) - return PTR_ERR(speedbin); + if (IS_ERR(speedbin_ptr)) + return PTR_ERR(speedbin_ptr); - efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK; + if (len <= 4) + memcpy(&speedbin, speedbin_ptr, len); + speedbin = le32_to_cpu(speedbin); - /* - * We treat unexpected efuse values as if the SoC was from - * the slowest bin. Expected efuse values are 1-3, slowest - * to fastest. - */ - if (efuse_value >= 1 && efuse_value <= 3) - *versions = efuse_value - 1; - else - *versions = 0; + ret = opp_data->efuse_xlate(speedbin); - kfree(speedbin); - return 0; + kfree(speedbin_ptr); + + return ret; }; static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) { int *opp_tokens; - char name[MAX_NAME_LEN]; - unsigned int cpu; - u32 speed = 0; + char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */ + unsigned int cpu, supported_hw; + struct dev_pm_opp_config config = {}; + int speed; int ret; opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens), @@ -95,15 +249,26 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) if (!opp_tokens) return -ENOMEM; - ret = sun50i_cpufreq_get_efuse(&speed); - if (ret) { + speed = sun50i_cpufreq_get_efuse(); + if (speed < 0) { kfree(opp_tokens); - return ret; + return speed; + } + + /* + * We need at least one OPP with the "opp-supported-hw" property, + * or else the upper layers will ignore every OPP and will bail out. + */ + if (dt_has_supported_hw()) { + supported_hw = 1U << speed; + config.supported_hw = &supported_hw; + config.supported_hw_count = 1; } - snprintf(name, MAX_NAME_LEN, "speed%d", speed); + snprintf(name, sizeof(name), "speed%d", speed); + config.prop_name = name; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu); if (!cpu_dev) { @@ -111,12 +276,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) goto free_opp; } - opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name); - if (opp_tokens[cpu] < 0) { - ret = opp_tokens[cpu]; - pr_err("Failed to set prop name\n"); + ret = dev_pm_opp_set_config(cpu_dev, &config); + if (ret < 0) goto free_opp; - } + + opp_tokens[cpu] = ret; } cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, @@ -130,8 +294,8 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) pr_err("Failed to register platform device\n"); free_opp: - for_each_possible_cpu(cpu) - dev_pm_opp_put_prop_name(opp_tokens[cpu]); + for_each_present_cpu(cpu) + dev_pm_opp_clear_config(opp_tokens[cpu]); kfree(opp_tokens); return ret; @@ -144,15 +308,15 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); - for_each_possible_cpu(cpu) - dev_pm_opp_put_prop_name(opp_tokens[cpu]); + for_each_present_cpu(cpu) + dev_pm_opp_clear_config(opp_tokens[cpu]); kfree(opp_tokens); } static struct platform_driver sun50i_cpufreq_driver = { .probe = sun50i_cpufreq_nvmem_probe, - .remove_new = sun50i_cpufreq_nvmem_remove, + .remove = sun50i_cpufreq_nvmem_remove, .driver = { .name = "sun50i-cpufreq-nvmem", }, @@ -160,20 +324,19 @@ static struct platform_driver sun50i_cpufreq_driver = { static const struct of_device_id sun50i_cpufreq_match_list[] = { { .compatible = "allwinner,sun50i-h6" }, + { .compatible = "allwinner,sun50i-a100" }, + { .compatible = "allwinner,sun50i-h616" }, + { .compatible = "allwinner,sun50i-h618" }, + { .compatible = "allwinner,sun50i-h700" }, {} }; MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); static const struct of_device_id *sun50i_cpufreq_match_node(void) { - const struct of_device_id *match; - struct device_node *np; - - np = of_find_node_by_path("/"); - match = of_match_node(sun50i_cpufreq_match_list, np); - of_node_put(np); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); - return match; + return of_match_node(sun50i_cpufreq_match_list, np); } /* diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index aae951d4e77c..514146d98bca 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -52,12 +52,15 @@ out: static int tegra124_cpufreq_probe(struct platform_device *pdev) { + struct device_node *np __free(device_node) = of_cpu_device_node_get(0); struct tegra124_cpufreq_priv *priv; - struct device_node *np; struct device *cpu_dev; struct platform_device_info cpufreq_dt_devinfo = {}; int ret; + if (!np) + return -ENODEV; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) if (!cpu_dev) return -ENODEV; - np = of_cpu_device_node_get(0); - if (!np) - return -ENODEV; - priv->cpu_clk = of_clk_get_by_name(np, "cpu_g"); - if (IS_ERR(priv->cpu_clk)) { - ret = PTR_ERR(priv->cpu_clk); - goto out_put_np; - } + if (IS_ERR(priv->cpu_clk)) + return PTR_ERR(priv->cpu_clk); priv->dfll_clk = of_clk_get_by_name(np, "dfll"); if (IS_ERR(priv->dfll_clk)) { @@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - of_node_put(np); - return 0; out_put_pllp_clk: @@ -122,8 +117,6 @@ out_put_dfll_clk: clk_put(priv->dfll_clk); out_put_cpu_clk: clk_put(priv->cpu_clk); -out_put_np: - of_node_put(np); return ret; } diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7b8fcfa55038..cbabb726c664 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; + u32 cpu; policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; policy->driver_data = NULL; + /* set same policy for all cpus in a cluster */ + for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { + if (data->cpus[cpu].bpmp_cluster_id == cluster) + cpumask_set_cpu(cpu, policy->cpus); + } + return 0; } @@ -123,7 +130,6 @@ static struct cpufreq_driver tegra186_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = tegra186_cpufreq_set_target, .init = tegra186_cpufreq_init, - .attr = cpufreq_generic_attr, }; static struct cpufreq_frequency_table *init_vhint_table( @@ -276,7 +282,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = { .of_match_table = tegra186_cpufreq_of_match, }, .probe = tegra186_cpufreq_probe, - .remove_new = tegra186_cpufreq_remove, + .remove = tegra186_cpufreq_remove, }; module_platform_driver(tegra186_cpufreq_platform_driver); diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 59865ea455a8..9b4f516f313e 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -551,14 +551,12 @@ static int tegra194_cpufreq_offline(struct cpufreq_policy *policy) return 0; } -static int tegra194_cpufreq_exit(struct cpufreq_policy *policy) +static void tegra194_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev = get_cpu_device(policy->cpu); dev_pm_opp_remove_all_dynamic(cpu_dev); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); - - return 0; } static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy, @@ -591,7 +589,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = { .exit = tegra194_cpufreq_exit, .online = tegra194_cpufreq_online, .offline = tegra194_cpufreq_offline, - .attr = cpufreq_generic_attr, }; static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { @@ -820,7 +817,7 @@ static struct platform_driver tegra194_ccplex_driver = { .of_match_table = tegra194_cpufreq_of_match, }, .probe = tegra194_cpufreq_probe, - .remove_new = tegra194_cpufreq_remove, + .remove = tegra194_cpufreq_remove, }; module_platform_driver(tegra194_ccplex_driver); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 46c41e2ca727..5a5147277cd0 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -16,6 +16,7 @@ #include <linux/pm_opp.h> #include <linux/regmap.h> #include <linux/slab.h> +#include <linux/sys_soc.h> #define REVISION_MASK 0xF #define REVISION_SHIFT 28 @@ -47,6 +48,35 @@ #define AM625_SUPPORT_S_MPU_OPP BIT(1) #define AM625_SUPPORT_T_MPU_OPP BIT(2) +enum { + AM62A7_EFUSE_M_MPU_OPP = 13, + AM62A7_EFUSE_N_MPU_OPP, + AM62A7_EFUSE_O_MPU_OPP, + AM62A7_EFUSE_P_MPU_OPP, + AM62A7_EFUSE_Q_MPU_OPP, + AM62A7_EFUSE_R_MPU_OPP, + AM62A7_EFUSE_S_MPU_OPP, + /* + * The V, U, and T speed grade numbering is out of order + * to align with the AM625 more uniformly. I promise I know + * my ABCs ;) + */ + AM62A7_EFUSE_V_MPU_OPP, + AM62A7_EFUSE_U_MPU_OPP, + AM62A7_EFUSE_T_MPU_OPP, +}; + +#define AM62A7_SUPPORT_N_MPU_OPP BIT(0) +#define AM62A7_SUPPORT_R_MPU_OPP BIT(1) +#define AM62A7_SUPPORT_V_MPU_OPP BIT(2) + +#define AM62P5_EFUSE_O_MPU_OPP 15 +#define AM62P5_EFUSE_S_MPU_OPP 19 +#define AM62P5_EFUSE_U_MPU_OPP 21 + +#define AM62P5_SUPPORT_O_MPU_OPP BIT(0) +#define AM62P5_SUPPORT_U_MPU_OPP BIT(2) + #define VERSION_COUNT 2 struct ti_cpufreq_data; @@ -61,6 +91,11 @@ struct ti_cpufreq_soc_data { unsigned long efuse_shift; unsigned long rev_offset; bool multi_regulator; +/* Backward compatibility hack: Might have missing syscon */ +#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 +/* Backward compatibility hack: new syscon size is 1 register wide */ +#define TI_QUIRK_SYSCON_IS_SINGLE_REG 0x2 + u8 quirks; }; struct ti_cpufreq_data { @@ -112,6 +147,49 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data, return BIT(efuse); } +static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP; + + switch (efuse) { + case AM62P5_EFUSE_U_MPU_OPP: + case AM62P5_EFUSE_S_MPU_OPP: + calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP; + fallthrough; + case AM62P5_EFUSE_O_MPU_OPP: + calculated_efuse |= AM62P5_SUPPORT_O_MPU_OPP; + } + + return calculated_efuse; +} + +static unsigned long am62a7_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + unsigned long calculated_efuse = AM62A7_SUPPORT_N_MPU_OPP; + + switch (efuse) { + case AM62A7_EFUSE_V_MPU_OPP: + case AM62A7_EFUSE_U_MPU_OPP: + case AM62A7_EFUSE_T_MPU_OPP: + case AM62A7_EFUSE_S_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_V_MPU_OPP; + fallthrough; + case AM62A7_EFUSE_R_MPU_OPP: + case AM62A7_EFUSE_Q_MPU_OPP: + case AM62A7_EFUSE_P_MPU_OPP: + case AM62A7_EFUSE_O_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_R_MPU_OPP; + fallthrough; + case AM62A7_EFUSE_N_MPU_OPP: + case AM62A7_EFUSE_M_MPU_OPP: + calculated_efuse |= AM62A7_SUPPORT_N_MPU_OPP; + } + + return calculated_efuse; +} + static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data, unsigned long efuse) { @@ -182,6 +260,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = { .efuse_mask = BIT(3), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, }; /* @@ -209,6 +288,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = { .efuse_mask = BIT(9), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = true, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, }; /* @@ -223,6 +303,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { .efuse_mask = 0, .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, +}; + +static const struct soc_device_attribute k3_cpufreq_soc[] = { + { .family = "AM62X", .revision = "SR1.0" }, + { .family = "AM62AX", .revision = "SR1.0" }, + { .family = "AM62PX", .revision = "SR1.0" }, + { /* sentinel */ } }; static struct ti_cpufreq_soc_data am625_soc_data = { @@ -230,7 +318,23 @@ static struct ti_cpufreq_soc_data am625_soc_data = { .efuse_offset = 0x0018, .efuse_mask = 0x07c0, .efuse_shift = 0x6, - .rev_offset = 0x0014, + .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_IS_SINGLE_REG, +}; + +static struct ti_cpufreq_soc_data am62a7_soc_data = { + .efuse_xlate = am62a7_efuse_xlate, + .efuse_offset = 0x0, + .efuse_mask = 0x07c0, + .efuse_shift = 0x6, + .multi_regulator = false, +}; + +static struct ti_cpufreq_soc_data am62p5_soc_data = { + .efuse_xlate = am62p5_efuse_xlate, + .efuse_offset = 0x0, + .efuse_mask = 0x07c0, + .efuse_shift = 0x6, .multi_regulator = false, }; @@ -250,7 +354,11 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data, ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); - if (ret == -EIO) { + + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_IS_SINGLE_REG && ret == -EIO) + ret = regmap_read(opp_data->syscon, 0x0, &efuse); + + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->efuse_offset, 4); @@ -288,10 +396,20 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, struct device *dev = opp_data->cpu_dev; u32 revision; int ret; + if (soc_device_match(k3_cpufreq_soc)) { + /* + * Since the SR is 1.0, hard code the revision_value as + * 0x1 here. This way we avoid re using the same register + * that is giving us required information inside socinfo + * anyway. + */ + *revision_value = 0x1; + goto done; + } ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, &revision); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->rev_offset, 4); @@ -310,6 +428,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, *revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK); +done: return 0; } @@ -329,7 +448,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) return 0; } -static const struct of_device_id ti_cpufreq_of_match[] = { +static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, { .compatible = "ti,am3517", .data = &am3517_soc_data, }, { .compatible = "ti,am43", .data = &am4x_soc_data, }, @@ -337,8 +456,8 @@ static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, { .compatible = "ti,am625", .data = &am625_soc_data, }, - { .compatible = "ti,am62a7", .data = &am625_soc_data, }, - { .compatible = "ti,am62p5", .data = &am625_soc_data, }, + { .compatible = "ti,am62a7", .data = &am62a7_soc_data, }, + { .compatible = "ti,am62p5", .data = &am62p5_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, @@ -347,12 +466,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = { static const struct of_device_id *ti_cpufreq_match_node(void) { - struct device_node *np; + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; - np = of_find_node_by_path("/"); match = of_match_node(ti_cpufreq_of_match, np); - of_node_put(np); return match; } @@ -419,7 +536,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config); if (ret < 0) { - dev_err(opp_data->cpu_dev, "Failed to set OPP config\n"); + dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n"); goto fail_put_node; } diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 9ac4ea50b874..65fea47b82e6 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -447,7 +447,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) return 0; } -static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) +static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; @@ -455,11 +455,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); - return -ENODEV; + return; } put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); - return 0; } static struct cpufreq_driver ve_spc_cpufreq_driver = { @@ -472,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = { .init = ve_spc_cpufreq_init, .exit = ve_spc_cpufreq_exit, .register_em = cpufreq_register_em_with_opp, - .attr = cpufreq_generic_attr, }; #ifdef CONFIG_BL_SWITCHER @@ -566,7 +564,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = { .name = "vexpress-spc-cpufreq", }, .probe = ve_spc_cpufreq_probe, - .remove_new = ve_spc_cpufreq_remove, + .remove = ve_spc_cpufreq_remove, }; module_platform_driver(ve_spc_cpufreq_platdrv); diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c new file mode 100644 index 000000000000..7dd1b0c263c7 --- /dev/null +++ b/drivers/cpufreq/virtual-cpufreq.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Google LLC + */ + +#include <linux/arch_topology.h> +#include <linux/cpufreq.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * CPU0..CPUn + * +-------------+-------------------------------+--------+-------+ + * | Register | Description | Offset | Len | + * +-------------+-------------------------------+--------+-------+ + * | cur_perf | read this register to get | 0x0 | 0x4 | + * | | the current perf (integer val | | | + * | | representing perf relative to | | | + * | | max performance) | | | + * | | that vCPU is running at | | | + * +-------------+-------------------------------+--------+-------+ + * | set_perf | write to this register to set | 0x4 | 0x4 | + * | | perf value of the vCPU | | | + * +-------------+-------------------------------+--------+-------+ + * | perftbl_len | number of entries in perf | 0x8 | 0x4 | + * | | table. A single entry in the | | | + * | | perf table denotes no table | | | + * | | and the entry contains | | | + * | | the maximum perf value | | | + * | | that this vCPU supports. | | | + * | | The guest can request any | | | + * | | value between 1 and max perf | | | + * | | when perftbls are not used. | | | + * +---------------------------------------------+--------+-------+ + * | perftbl_sel | write to this register to | 0xc | 0x4 | + * | | select perf table entry to | | | + * | | read from | | | + * +---------------------------------------------+--------+-------+ + * | perftbl_rd | read this register to get | 0x10 | 0x4 | + * | | perf value of the selected | | | + * | | entry based on perftbl_sel | | | + * +---------------------------------------------+--------+-------+ + * | perf_domain | performance domain number | 0x14 | 0x4 | + * | | that this vCPU belongs to. | | | + * | | vCPUs sharing the same perf | | | + * | | domain number are part of the | | | + * | | same performance domain. | | | + * +-------------+-------------------------------+--------+-------+ + */ + +#define REG_CUR_PERF_STATE_OFFSET 0x0 +#define REG_SET_PERF_STATE_OFFSET 0x4 +#define REG_PERFTBL_LEN_OFFSET 0x8 +#define REG_PERFTBL_SEL_OFFSET 0xc +#define REG_PERFTBL_RD_OFFSET 0x10 +#define REG_PERF_DOMAIN_OFFSET 0x14 +#define PER_CPU_OFFSET 0x1000 + +#define PERFTBL_MAX_ENTRIES 64U + +static void __iomem *base; +static DEFINE_PER_CPU(u32, perftbl_num_entries); + +static void virt_scale_freq_tick(void) +{ + int cpu = smp_processor_id(); + u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu); + u64 cur_freq; + unsigned long scale; + + cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_CUR_PERF_STATE_OFFSET); + + cur_freq <<= SCHED_CAPACITY_SHIFT; + scale = (unsigned long)div_u64(cur_freq, max_freq); + scale = min(scale, SCHED_CAPACITY_SCALE); + + this_cpu_write(arch_freq_scale, scale); +} + +static struct scale_freq_data virt_sfd = { + .source = SCALE_FREQ_SOURCE_VIRT, + .set_freq_scale = virt_scale_freq_tick, +}; + +static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + writel_relaxed(target_freq, + base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET); + return 0; +} + +static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + virt_cpufreq_set_perf(policy, target_freq); + return target_freq; +} + +static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx) +{ + writel_relaxed(idx, base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_SEL_OFFSET); + return readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_RD_OFFSET); +} + +static int virt_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_freqs freqs; + int ret = 0; + + freqs.old = policy->cur; + freqs.new = target_freq; + + cpufreq_freq_transition_begin(policy, &freqs); + ret = virt_cpufreq_set_perf(policy, target_freq); + cpufreq_freq_transition_end(policy, &freqs, ret != 0); + + return ret; +} + +static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy) +{ + u32 cur_perf_domain, perf_domain; + struct device *cpu_dev; + int cpu; + + cur_perf_domain = readl_relaxed(base + policy->cpu * + PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET); + + for_each_present_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + continue; + + perf_domain = readl_relaxed(base + cpu * + PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET); + + if (perf_domain == cur_perf_domain) + cpumask_set_cpu(cpu, policy->cpus); + } + + return 0; +} + +static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *table; + u32 num_perftbl_entries, idx; + + num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu); + + if (num_perftbl_entries == 1) { + policy->cpuinfo.min_freq = 1; + policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0); + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + + policy->cur = policy->max; + return 0; + } + + table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (idx = 0; idx < num_perftbl_entries; idx++) + table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx); + + table[idx].frequency = CPUFREQ_TABLE_END; + policy->freq_table = table; + + return 0; +} + +static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + struct device *cpu_dev; + int ret; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) + return -ENODEV; + + ret = virt_cpufreq_get_freq_info(policy); + if (ret) { + dev_warn(cpu_dev, "failed to get cpufreq info\n"); + return ret; + } + + ret = virt_cpufreq_get_sharing_cpus(policy); + if (ret) { + dev_warn(cpu_dev, "failed to get sharing cpumask\n"); + return ret; + } + + /* + * To simplify and improve latency of handling frequency requests on + * the host side, this ensures that the vCPU thread triggering the MMIO + * abort is the same thread whose performance constraints (Ex. uclamp + * settings) need to be updated. This simplifies the VMM (Virtual + * Machine Manager) having to find the correct vCPU thread and/or + * facing permission issues when configuring other threads. + */ + policy->dvfs_possible_from_any_cpu = false; + policy->fast_switch_possible = true; + + /* + * Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since + * the actual physical CPU frequency may not match requested frequency + * from the vCPU thread due to frequency update latencies or other + * inputs to the physical CPU frequency selection. This additional FIE + * source allows for more accurate freq_scale updates and only takes + * effect if another FIE source such as AMUs have not been registered. + */ + topology_set_scale_freq_source(&virt_sfd, policy->cpus); + + return 0; +} + +static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus); + kfree(policy->freq_table); +} + +static int virt_cpufreq_online(struct cpufreq_policy *policy) +{ + /* Nothing to restore. */ + return 0; +} + +static int virt_cpufreq_offline(struct cpufreq_policy *policy) +{ + /* Dummy offline() to avoid exit() being called and freeing resources. */ + return 0; +} + +static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + if (policy->freq_table) + return cpufreq_frequency_table_verify(policy, policy->freq_table); + + cpufreq_verify_within_cpu_limits(policy); + return 0; +} + +static struct cpufreq_driver cpufreq_virt_driver = { + .name = "virt-cpufreq", + .init = virt_cpufreq_cpu_init, + .exit = virt_cpufreq_cpu_exit, + .online = virt_cpufreq_online, + .offline = virt_cpufreq_offline, + .verify = virt_cpufreq_verify_policy, + .target = virt_cpufreq_target, + .fast_switch = virt_cpufreq_fast_switch, +}; + +static int virt_cpufreq_driver_probe(struct platform_device *pdev) +{ + u32 num_perftbl_entries; + int ret, cpu; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + for_each_possible_cpu(cpu) { + num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET + + REG_PERFTBL_LEN_OFFSET); + + if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES) + return -ENODEV; + + per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries; + } + + ret = cpufreq_register_driver(&cpufreq_virt_driver); + if (ret) { + dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret); + return ret; + } + + dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n"); + return 0; +} + +static void virt_cpufreq_driver_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&cpufreq_virt_driver); +} + +static const struct of_device_id virt_cpufreq_match[] = { + { .compatible = "qemu,virtual-cpufreq", .data = NULL}, + {} +}; +MODULE_DEVICE_TABLE(of, virt_cpufreq_match); + +static struct platform_driver virt_cpufreq_driver = { + .probe = virt_cpufreq_driver_probe, + .remove = virt_cpufreq_driver_remove, + .driver = { + .name = "virt-cpufreq", + .of_match_table = virt_cpufreq_match, + }, +}; + +static int __init virt_cpufreq_init(void) +{ + return platform_driver_register(&virt_cpufreq_driver); +} +postcore_initcall(virt_cpufreq_init); + +static void __exit virt_cpufreq_exit(void) +{ + platform_driver_unregister(&virt_cpufreq_driver); +} +module_exit(virt_cpufreq_exit); + +MODULE_DESCRIPTION("Virtual cpufreq driver"); +MODULE_LICENSE("GPL"); |